From 23be4b7e1f1d4de8727984fcba69875614b1b4b1 Mon Sep 17 00:00:00 2001 From: asobolev Date: Mon, 18 Aug 2025 17:46:20 +0200 Subject: [PATCH 01/37] first taf draft + tests --- test_automation_framework/.flake8 | 4 + test_automation_framework/.gitignore | 5 + test_automation_framework/.pdm-python | 1 + .../.pre-commit-config.yaml | 12 + test_automation_framework/README.md | 47 ++ .../config/defaults.yaml | 7 + test_automation_framework/conftest.py | 28 ++ .../helpers/auth/auth_service.py | 50 +++ .../helpers/base_client/base_client.py | 73 +++ test_automation_framework/pdm.lock | 425 ++++++++++++++++++ test_automation_framework/pyproject.toml | 15 + test_automation_framework/settings.py | 33 ++ .../tests/test_base_api.py | 31 ++ 13 files changed, 731 insertions(+) create mode 100644 test_automation_framework/.flake8 create mode 100644 test_automation_framework/.gitignore create mode 100644 test_automation_framework/.pdm-python create mode 100644 test_automation_framework/.pre-commit-config.yaml create mode 100644 test_automation_framework/README.md create mode 100644 test_automation_framework/config/defaults.yaml create mode 100644 test_automation_framework/conftest.py create mode 100644 test_automation_framework/helpers/auth/auth_service.py create mode 100644 test_automation_framework/helpers/base_client/base_client.py create mode 100644 test_automation_framework/pdm.lock create mode 100644 test_automation_framework/pyproject.toml create mode 100644 test_automation_framework/settings.py create mode 100644 test_automation_framework/tests/test_base_api.py diff --git a/test_automation_framework/.flake8 b/test_automation_framework/.flake8 new file mode 100644 index 000000000..37c03609e --- /dev/null +++ b/test_automation_framework/.flake8 @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 120 +ignore = E203, W503 +exclude = .git,__pycache__,build,dist diff --git a/test_automation_framework/.gitignore b/test_automation_framework/.gitignore new file mode 100644 index 000000000..2d08c15ef --- /dev/null +++ b/test_automation_framework/.gitignore @@ -0,0 +1,5 @@ +.env +__pycache__/ +*.pyc +.vscode/ +.idea/ \ No newline at end of file diff --git a/test_automation_framework/.pdm-python b/test_automation_framework/.pdm-python new file mode 100644 index 000000000..cb1f7523e --- /dev/null +++ b/test_automation_framework/.pdm-python @@ -0,0 +1 @@ +/Users/Aleksei_Sobolev/Work/BadgerDoc/taf/.venv/bin/python \ No newline at end of file diff --git a/test_automation_framework/.pre-commit-config.yaml b/test_automation_framework/.pre-commit-config.yaml new file mode 100644 index 000000000..f61617e7c --- /dev/null +++ b/test_automation_framework/.pre-commit-config.yaml @@ -0,0 +1,12 @@ +repos: + - repo: https://github.com/psf/black + rev: 25.1.0 + hooks: + - id: black + language_version: python3 + + - repo: https://github.com/pycqa/flake8 + rev: 7.3.0 + hooks: + - id: flake8 + additional_dependencies: [] diff --git a/test_automation_framework/README.md b/test_automation_framework/README.md new file mode 100644 index 000000000..b8d16ec11 --- /dev/null +++ b/test_automation_framework/README.md @@ -0,0 +1,47 @@ +# BadgerDoc Test Automation Framework + +This project is a Python-based **test automation framework** built with [pytest](https://docs.pytest.org/). + +## Getting Started + +### 1. Install PDM +Make sure you have [PDM](https://pdm-project.org/latest/#installation) installed: + +```bash +brew install pdm # macOS +# or +pip install pdm +``` + +Verify installation: + +```bash +pdm --version +``` + +### 2. Clone the repository + +```bash +git clone https://github.com/epam/badgerdoc.git +cd badgerdoc +``` + +### 3. Install dependencies + +```bash +pdm install +``` + +### 4. Pre-commit hooks + +Enable pre-commit to enforce style and linting: +```bash +pre-commit install +``` +Now hooks will run automatically before each commit. + +### 5. Run tests + +```bash +pdm run pytest +``` \ No newline at end of file diff --git a/test_automation_framework/config/defaults.yaml b/test_automation_framework/config/defaults.yaml new file mode 100644 index 000000000..10cd5f9f2 --- /dev/null +++ b/test_automation_framework/config/defaults.yaml @@ -0,0 +1,7 @@ +BASE_URL: "http://demo.badgerdoc.com:8080" +TIMEOUT_SECONDS: 30 +MAX_WORKERS: 4 +USE_MOCK_LLM: true +LOG_LEVEL: "INFO" +API_USER: "user@example.com" +API_PASS: "changeme" diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py new file mode 100644 index 000000000..baec0b9be --- /dev/null +++ b/test_automation_framework/conftest.py @@ -0,0 +1,28 @@ +import pytest +from settings import load_settings +from helpers.auth.auth_service import AuthService +from helpers.base_client.base_client import BaseClient + + +@pytest.fixture(scope="session") +def base_client(settings): + client = BaseClient(settings.BASE_URL, timeout=10) + yield client + client.close() + + +@pytest.fixture(scope="session") +def auth_service(base_client) -> AuthService: + return AuthService(base_client) + + +@pytest.fixture(scope="session") +def auth_token(auth_service, settings) -> tuple[str, str]: + return auth_service.get_token( + settings.API_USER, settings.API_PASS.get_secret_value() + ) + + +@pytest.fixture(scope="session") +def settings(): + return load_settings() diff --git a/test_automation_framework/helpers/auth/auth_service.py b/test_automation_framework/helpers/auth/auth_service.py new file mode 100644 index 000000000..d9c3b3b0a --- /dev/null +++ b/test_automation_framework/helpers/auth/auth_service.py @@ -0,0 +1,50 @@ +from __future__ import annotations +from typing import Optional +from pydantic import BaseModel + +from helpers.base_client.base_client import BaseClient + + +class TokenResponse(BaseModel): + access_token: str + refresh_token: str + id_token: Optional[str] = None + scope: Optional[str] = None + session_state: Optional[str] = None + token_type: Optional[str] = None + expires_in: Optional[int] = None + + +class AuthService: + def __init__(self, client: BaseClient) -> None: + self.client = client + + def get_token( + self, username: str, password: str, client_id: str = "admin-cli" + ) -> tuple[str, str]: + resp = self.client.post( + "/users/token", + data={ + "grant_type": "password", + "username": username, + "password": password, + "client_id": client_id, + }, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + result = TokenResponse.model_validate(resp.json()) + return result.access_token, result.refresh_token + + def refresh_token( + self, refresh_token: str, client_id: str = "admin-cli" + ) -> tuple[str, str]: + resp = self.client.post( + "/users/refresh_token", + json={ + "grant_type": "refresh_token", + "client_id": client_id, + "refresh_token": refresh_token, + }, + ) + result = TokenResponse.model_validate(resp.json()) + return result.access_token, result.refresh_token diff --git a/test_automation_framework/helpers/base_client/base_client.py b/test_automation_framework/helpers/base_client/base_client.py new file mode 100644 index 000000000..11fc13218 --- /dev/null +++ b/test_automation_framework/helpers/base_client/base_client.py @@ -0,0 +1,73 @@ +from __future__ import annotations +from typing import Any, Optional +import httpx +import time +import logging + +logger = logging.getLogger(__name__) + + +class HTTPError(RuntimeError): + def __init__( + self, + message: str, + status_code: Optional[int] = None, + body: Optional[str] = None, + ): + super().__init__(message) + self.status_code = status_code + self.body = body + + +class BaseClient: + def __init__(self, base_url: str, timeout: int = 30) -> None: + self.base_url = base_url.rstrip("/") + self.timeout = timeout + self._client = httpx.Client(base_url=self.base_url, timeout=self.timeout) + + def _request(self, method: str, path: str, **kwargs: Any) -> httpx.Response: + rel_path = path if path.startswith("/") else "/" + path + start = time.perf_counter() + try: + resp = self._client.request(method, rel_path, **kwargs) + resp.raise_for_status() + logger.debug( + f"HTTP {method} {self.base_url}{rel_path} -> {resp.status_code} in {time.perf_counter() - start:.3f}s" + ) + return resp + except httpx.HTTPStatusError as exc: + resp = exc.response + logger.error( + f"Bad response: {resp.status_code} for {method} {self.base_url}{rel_path} - body: {resp.text[:500]}" + ) + raise HTTPError( + f"{method} {self.base_url}{rel_path} -> {resp.status_code}", + status_code=resp.status_code, + body=resp.text, + ) from exc + except httpx.RequestError as exc: + logger.exception(f"Request failed: {method} {self.base_url}{rel_path}") + raise HTTPError( + f"request failed: {method} {self.base_url}{rel_path}" + ) from exc + + def get(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("GET", path, **kwargs) + + def post(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("POST", path, **kwargs) + + def put(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("PUT", path, **kwargs) + + def delete(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("DELETE", path, **kwargs) + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "BaseClient": + return self + + def __exit__(self, exc_type, exc, tb) -> None: + self.close() diff --git a/test_automation_framework/pdm.lock b/test_automation_framework/pdm.lock new file mode 100644 index 000000000..d597e7a73 --- /dev/null +++ b/test_automation_framework/pdm.lock @@ -0,0 +1,425 @@ +# This file is @generated by PDM. +# It is not intended for manual editing. + +[metadata] +groups = ["default"] +strategy = ["inherit_metadata"] +lock_version = "4.5.0" +content_hash = "sha256:d56b4fa3df2a34dc34169a0a8ae56c73e69c5bec37976962b9e4dfc446248a24" + +[[metadata.targets]] +requires_python = "==3.13.*" + +[[package]] +name = "annotated-types" +version = "0.7.0" +requires_python = ">=3.8" +summary = "Reusable constraint types to use with typing.Annotated" +groups = ["default"] +dependencies = [ + "typing-extensions>=4.0.0; python_version < \"3.9\"", +] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.10.0" +requires_python = ">=3.9" +summary = "High-level concurrency and networking framework on top of asyncio or Trio" +groups = ["default"] +dependencies = [ + "exceptiongroup>=1.0.2; python_version < \"3.11\"", + "idna>=2.8", + "sniffio>=1.1", + "typing-extensions>=4.5; python_version < \"3.13\"", +] +files = [ + {file = "anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1"}, + {file = "anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6"}, +] + +[[package]] +name = "certifi" +version = "2025.8.3" +requires_python = ">=3.7" +summary = "Python package for providing Mozilla's CA Bundle." +groups = ["default"] +files = [ + {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"}, + {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"}, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +requires_python = ">=3.8" +summary = "Validate configuration and produce human readable error messages." +groups = ["default"] +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +summary = "Cross-platform colored terminal text." +groups = ["default"] +marker = "sys_platform == \"win32\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "distlib" +version = "0.4.0" +summary = "Distribution utilities" +groups = ["default"] +files = [ + {file = "distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16"}, + {file = "distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d"}, +] + +[[package]] +name = "dotenv" +version = "0.9.9" +summary = "Deprecated package" +groups = ["default"] +dependencies = [ + "python-dotenv", +] +files = [ + {file = "dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9"}, +] + +[[package]] +name = "filelock" +version = "3.19.1" +requires_python = ">=3.9" +summary = "A platform independent file lock." +groups = ["default"] +files = [ + {file = "filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d"}, + {file = "filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58"}, +] + +[[package]] +name = "h11" +version = "0.16.0" +requires_python = ">=3.8" +summary = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +groups = ["default"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +requires_python = ">=3.8" +summary = "A minimal low-level HTTP client." +groups = ["default"] +dependencies = [ + "certifi", + "h11>=0.16", +] +files = [ + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, +] + +[[package]] +name = "httpx" +version = "0.28.1" +requires_python = ">=3.8" +summary = "The next generation HTTP client." +groups = ["default"] +dependencies = [ + "anyio", + "certifi", + "httpcore==1.*", + "idna", +] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[[package]] +name = "identify" +version = "2.6.13" +requires_python = ">=3.9" +summary = "File identification library for Python" +groups = ["default"] +files = [ + {file = "identify-2.6.13-py2.py3-none-any.whl", hash = "sha256:60381139b3ae39447482ecc406944190f690d4a2997f2584062089848361b33b"}, + {file = "identify-2.6.13.tar.gz", hash = "sha256:da8d6c828e773620e13bfa86ea601c5a5310ba4bcd65edf378198b56a1f9fb32"}, +] + +[[package]] +name = "idna" +version = "3.10" +requires_python = ">=3.6" +summary = "Internationalized Domain Names in Applications (IDNA)" +groups = ["default"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +requires_python = ">=3.8" +summary = "brain-dead simple config-ini parsing" +groups = ["default"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +summary = "Node.js virtual environment builder" +groups = ["default"] +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "25.0" +requires_python = ">=3.8" +summary = "Core utilities for Python packages" +groups = ["default"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +requires_python = ">=3.9" +summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +groups = ["default"] +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +requires_python = ">=3.9" +summary = "plugin and hook calling mechanisms for python" +groups = ["default"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[[package]] +name = "pre-commit" +version = "4.3.0" +requires_python = ">=3.9" +summary = "A framework for managing and maintaining multi-language pre-commit hooks." +groups = ["default"] +dependencies = [ + "cfgv>=2.0.0", + "identify>=1.0.0", + "nodeenv>=0.11.1", + "pyyaml>=5.1", + "virtualenv>=20.10.0", +] +files = [ + {file = "pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8"}, + {file = "pre_commit-4.3.0.tar.gz", hash = "sha256:499fe450cc9d42e9d58e606262795ecb64dd05438943c62b66f6a8673da30b16"}, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +requires_python = ">=3.9" +summary = "Data validation using Python type hints" +groups = ["default"] +dependencies = [ + "annotated-types>=0.6.0", + "pydantic-core==2.33.2", + "typing-extensions>=4.12.2", + "typing-inspection>=0.4.0", +] +files = [ + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +requires_python = ">=3.9" +summary = "Core functionality for Pydantic validation and serialization" +groups = ["default"] +dependencies = [ + "typing-extensions!=4.7.0,>=4.6.0", +] +files = [ + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[[package]] +name = "pydantic-settings" +version = "2.10.1" +requires_python = ">=3.9" +summary = "Settings management using Pydantic" +groups = ["default"] +dependencies = [ + "pydantic>=2.7.0", + "python-dotenv>=0.21.0", + "typing-inspection>=0.4.0", +] +files = [ + {file = "pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796"}, + {file = "pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee"}, +] + +[[package]] +name = "pygments" +version = "2.19.2" +requires_python = ">=3.8" +summary = "Pygments is a syntax highlighting package written in Python." +groups = ["default"] +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] + +[[package]] +name = "pytest" +version = "8.4.1" +requires_python = ">=3.9" +summary = "pytest: simple powerful testing with Python" +groups = ["default"] +dependencies = [ + "colorama>=0.4; sys_platform == \"win32\"", + "exceptiongroup>=1; python_version < \"3.11\"", + "iniconfig>=1", + "packaging>=20", + "pluggy<2,>=1.5", + "pygments>=2.7.2", + "tomli>=1; python_version < \"3.11\"", +] +files = [ + {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, + {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +requires_python = ">=3.9" +summary = "Read key-value pairs from a .env file and set them as environment variables" +groups = ["default"] +files = [ + {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, + {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +requires_python = ">=3.8" +summary = "YAML parser and emitter for Python" +groups = ["default"] +files = [ + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +requires_python = ">=3.7" +summary = "Sniff out which async library your code is running under" +groups = ["default"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "typing-extensions" +version = "4.14.1" +requires_python = ">=3.9" +summary = "Backported and Experimental Type Hints for Python 3.9+" +groups = ["default"] +files = [ + {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, + {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +requires_python = ">=3.9" +summary = "Runtime typing introspection tools" +groups = ["default"] +dependencies = [ + "typing-extensions>=4.12.0", +] +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, +] + +[[package]] +name = "virtualenv" +version = "20.34.0" +requires_python = ">=3.8" +summary = "Virtual Python Environment builder" +groups = ["default"] +dependencies = [ + "distlib<1,>=0.3.7", + "filelock<4,>=3.12.2", + "importlib-metadata>=6.6; python_version < \"3.8\"", + "platformdirs<5,>=3.9.1", + "typing-extensions>=4.13.2; python_version < \"3.11\"", +] +files = [ + {file = "virtualenv-20.34.0-py3-none-any.whl", hash = "sha256:341f5afa7eee943e4984a9207c025feedd768baff6753cd660c857ceb3e36026"}, + {file = "virtualenv-20.34.0.tar.gz", hash = "sha256:44815b2c9dee7ed86e387b842a84f20b93f7f417f95886ca1996a72a4138eb1a"}, +] diff --git a/test_automation_framework/pyproject.toml b/test_automation_framework/pyproject.toml new file mode 100644 index 000000000..67db4bda5 --- /dev/null +++ b/test_automation_framework/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "badgerdoc_taf" +version = "0.1.0" +description = "Default template for PDM package" +authors = [ + {name = "asobolev", email = "aleksei_sobolev@epam.com"}, +] +dependencies = ["PyYAML==6.0.2", "dotenv==0.9.9", "httpx==0.28.1", "pre-commit==4.3.0", "pydantic-settings==2.10.1", "pydantic==2.11.7", "pytest==8.4.1"] +requires-python = "==3.13.*" +readme = "README.md" +license = {text = "MIT"} + + +[tool.pdm] +distribution = false diff --git a/test_automation_framework/settings.py b/test_automation_framework/settings.py new file mode 100644 index 000000000..8f5a59398 --- /dev/null +++ b/test_automation_framework/settings.py @@ -0,0 +1,33 @@ +import yaml +from pathlib import Path +from pydantic_settings import BaseSettings, SettingsConfigDict +from pydantic import SecretStr + +ROOT = Path(__file__).parent +DEFAULTS_PATH = ROOT / "config" / "defaults.yaml" + + +class Settings(BaseSettings): + BASE_URL: str + API_USER: str + API_PASS: SecretStr + TIMEOUT_SECONDS: int = 30 + MAX_WORKERS: int = 4 + USE_MOCK_LLM: bool = True + LOG_LEVEL: str = "INFO" + LLM_API_KEY: str | None = None + + model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8") + + +def load_settings() -> Settings: + with open(DEFAULTS_PATH, "r") as f: + yaml_defaults = yaml.safe_load(f) + + from dotenv import dotenv_values + + env_data = dotenv_values(".env") + + merged = {**yaml_defaults, **{k: v for k, v in env_data.items() if v is not None}} + + return Settings(**merged) diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py new file mode 100644 index 000000000..5c760ea8b --- /dev/null +++ b/test_automation_framework/tests/test_base_api.py @@ -0,0 +1,31 @@ +from logging import getLogger +import pytest +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestAuthAPI: + def test_basic_auth(self, auth_token): + access_token, refresh_token = auth_token + assert access_token, "No access_token found!" + assert refresh_token, "No refresh_token found!" + + def test_wrong_creds(self, auth_service): + with pytest.raises(HTTPError) as e: + auth_service.get_token("wrong", "wrong") + assert ( + e.value.status_code == 401 + ), f"Expected 401 but got {e.value.status_code}: {e.value.body}" + + def test_refresh_token(self, auth_token, auth_service): + access_token, refresh_token = auth_token + new_access_token, new_refresh_token = auth_service.refresh_token( + refresh_token=refresh_token + ) + assert ( + new_access_token != access_token + ), "Old access token is the same as new access token!" + assert ( + new_refresh_token != refresh_token + ), "Old refresh token is the same as new refresh token!" From 2d86d0d11cbbe989da2f657b24a71e628890ac60 Mon Sep 17 00:00:00 2001 From: asobolev Date: Mon, 18 Aug 2025 17:49:17 +0200 Subject: [PATCH 02/37] removed an excessive file --- test_automation_framework/.pdm-python | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test_automation_framework/.pdm-python diff --git a/test_automation_framework/.pdm-python b/test_automation_framework/.pdm-python deleted file mode 100644 index cb1f7523e..000000000 --- a/test_automation_framework/.pdm-python +++ /dev/null @@ -1 +0,0 @@ -/Users/Aleksei_Sobolev/Work/BadgerDoc/taf/.venv/bin/python \ No newline at end of file From ab06e3e7479f09c7e5b61d976ebf6fe47628ba7a Mon Sep 17 00:00:00 2001 From: asobolev Date: Wed, 20 Aug 2025 16:18:00 +0200 Subject: [PATCH 03/37] menu and search tests --- .../.pre-commit-config.yaml | 2 + test_automation_framework/conftest.py | 4 +- .../helpers/auth/auth_service.py | 8 +- .../helpers/base_client/base_client.py | 4 +- .../helpers/datasets/dataset_client.py | 32 +++++ .../helpers/menu/menu_client.py | 19 +++ test_automation_framework/settings.py | 5 +- .../tests/test_base_api.py | 111 ++++++++++++++++-- 8 files changed, 161 insertions(+), 24 deletions(-) create mode 100644 test_automation_framework/helpers/datasets/dataset_client.py create mode 100644 test_automation_framework/helpers/menu/menu_client.py diff --git a/test_automation_framework/.pre-commit-config.yaml b/test_automation_framework/.pre-commit-config.yaml index f61617e7c..f1d963b4e 100644 --- a/test_automation_framework/.pre-commit-config.yaml +++ b/test_automation_framework/.pre-commit-config.yaml @@ -3,10 +3,12 @@ repos: rev: 25.1.0 hooks: - id: black + args: [--line-length=120] language_version: python3 - repo: https://github.com/pycqa/flake8 rev: 7.3.0 hooks: - id: flake8 + args: [--config=test_automation_framework/.flake8] additional_dependencies: [] diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index baec0b9be..bfdcbfd22 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -18,9 +18,7 @@ def auth_service(base_client) -> AuthService: @pytest.fixture(scope="session") def auth_token(auth_service, settings) -> tuple[str, str]: - return auth_service.get_token( - settings.API_USER, settings.API_PASS.get_secret_value() - ) + return auth_service.get_token(settings.API_USER, settings.API_PASS.get_secret_value()) @pytest.fixture(scope="session") diff --git a/test_automation_framework/helpers/auth/auth_service.py b/test_automation_framework/helpers/auth/auth_service.py index d9c3b3b0a..5c5ffecbc 100644 --- a/test_automation_framework/helpers/auth/auth_service.py +++ b/test_automation_framework/helpers/auth/auth_service.py @@ -19,9 +19,7 @@ class AuthService: def __init__(self, client: BaseClient) -> None: self.client = client - def get_token( - self, username: str, password: str, client_id: str = "admin-cli" - ) -> tuple[str, str]: + def get_token(self, username: str, password: str, client_id: str = "admin-cli") -> tuple[str, str]: resp = self.client.post( "/users/token", data={ @@ -35,9 +33,7 @@ def get_token( result = TokenResponse.model_validate(resp.json()) return result.access_token, result.refresh_token - def refresh_token( - self, refresh_token: str, client_id: str = "admin-cli" - ) -> tuple[str, str]: + def refresh_token(self, refresh_token: str, client_id: str = "admin-cli") -> tuple[str, str]: resp = self.client.post( "/users/refresh_token", json={ diff --git a/test_automation_framework/helpers/base_client/base_client.py b/test_automation_framework/helpers/base_client/base_client.py index 11fc13218..06b23bcd2 100644 --- a/test_automation_framework/helpers/base_client/base_client.py +++ b/test_automation_framework/helpers/base_client/base_client.py @@ -47,9 +47,7 @@ def _request(self, method: str, path: str, **kwargs: Any) -> httpx.Response: ) from exc except httpx.RequestError as exc: logger.exception(f"Request failed: {method} {self.base_url}{rel_path}") - raise HTTPError( - f"request failed: {method} {self.base_url}{rel_path}" - ) from exc + raise HTTPError(f"request failed: {method} {self.base_url}{rel_path}") from exc def get(self, path: str, **kwargs: Any) -> httpx.Response: return self._request("GET", path, **kwargs) diff --git a/test_automation_framework/helpers/datasets/dataset_client.py b/test_automation_framework/helpers/datasets/dataset_client.py new file mode 100644 index 000000000..d2ebd2957 --- /dev/null +++ b/test_automation_framework/helpers/datasets/dataset_client.py @@ -0,0 +1,32 @@ +from __future__ import annotations +from helpers.base_client.base_client import BaseClient + + +class DatasetClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url) + self._token = token + self._tenant = tenant + + def search( + self, + page_num: int = 1, + page_size: int = 100, + filters: list[dict] | None = None, + sorting: list[dict] | None = None, + ) -> dict: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters or [], + "sorting": sorting or [{"direction": "asc", "field": "name"}], + } + resp = self.post( + "/assets/datasets/search", + json=payload, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + "Content-Type": "application/json", + }, + ) + return resp.json() diff --git a/test_automation_framework/helpers/menu/menu_client.py b/test_automation_framework/helpers/menu/menu_client.py new file mode 100644 index 000000000..7861b2206 --- /dev/null +++ b/test_automation_framework/helpers/menu/menu_client.py @@ -0,0 +1,19 @@ +from __future__ import annotations +from helpers.base_client.base_client import BaseClient + + +class MenuClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url) + self._token = token + self._tenant = tenant + + def get_menu(self) -> list[dict]: + resp = self.get( + "/core/menu", + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + }, + ) + return resp.json() diff --git a/test_automation_framework/settings.py b/test_automation_framework/settings.py index 8f5a59398..13076ce2f 100644 --- a/test_automation_framework/settings.py +++ b/test_automation_framework/settings.py @@ -28,6 +28,9 @@ def load_settings() -> Settings: env_data = dotenv_values(".env") - merged = {**yaml_defaults, **{k: v for k, v in env_data.items() if v is not None}} + merged = { + **yaml_defaults, + **{k: v for k, v in env_data.items() if v is not None}, + } return Settings(**merged) diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index 5c760ea8b..6e669286e 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -1,6 +1,9 @@ from logging import getLogger import pytest from helpers.base_client.base_client import HTTPError +from helpers.menu.menu_client import MenuClient +from helpers.datasets.dataset_client import DatasetClient +from datetime import datetime logger = getLogger(__name__) @@ -14,18 +17,104 @@ def test_basic_auth(self, auth_token): def test_wrong_creds(self, auth_service): with pytest.raises(HTTPError) as e: auth_service.get_token("wrong", "wrong") - assert ( - e.value.status_code == 401 - ), f"Expected 401 but got {e.value.status_code}: {e.value.body}" + assert e.value.status_code == 401, f"Expected 401 but got {e.value.status_code}: {e.value.body}" def test_refresh_token(self, auth_token, auth_service): access_token, refresh_token = auth_token - new_access_token, new_refresh_token = auth_service.refresh_token( - refresh_token=refresh_token - ) - assert ( - new_access_token != access_token - ), "Old access token is the same as new access token!" + new_access_token, new_refresh_token = auth_service.refresh_token(refresh_token=refresh_token) + assert new_access_token != access_token, "Old access token is the same as new access token!" + assert new_refresh_token != refresh_token, "Old refresh token is the same as new refresh token!" + + +class TestAPI: + def test_menu(self, auth_token, settings): + access_token, _ = auth_token + tenant = "demo-badgerdoc" + menu_client = MenuClient(settings.BASE_URL, access_token, tenant) + menu = menu_client.get_menu() + + assert isinstance(menu, list), "Menu is expected to be a list!" + assert len(menu), "Menu should have been returned!" + + required_keys = { + "name", + "badgerdoc_path", + "is_external", + "is_iframe", + "url", + "children", + } + for item in menu: + assert required_keys <= item.keys(), f"Menu item missing keys: {item}" + + first_item = menu[0] + assert isinstance(first_item["name"], str) + assert isinstance(first_item["badgerdoc_path"], str) + assert isinstance(first_item["is_external"], bool) + assert isinstance(first_item["children"], (list, type(None))) + + expected_names = {"Documents", "My Tasks", "Jobs", "Settings"} + actual_names = {item["name"] for item in menu} + assert expected_names <= actual_names, f"Missing expected menu items: {expected_names - actual_names}" + + settings_item = next(item for item in menu if item["name"] == "Settings") + assert isinstance(settings_item["children"], list) + assert any(child["name"] == "Keycloak" for child in settings_item["children"]) + + +class TestDatasetClient: + def test_search_basic(self, auth_token, settings): + access_token, _ = auth_token + tenant = "demo-badgerdoc" + client = DatasetClient(settings.BASE_URL, access_token, tenant) + + result = client.search() + + assert "pagination" in result, "Response must have 'pagination'" + assert "data" in result, "Response must have 'data'" + assert isinstance(result["data"], list), "'data' must be a list" + + pagination = result["pagination"] + required_pagination_keys = { + "page_num", + "page_offset", + "page_size", + "min_pages_left", + "total", + "has_more", + } assert ( - new_refresh_token != refresh_token - ), "Old refresh token is the same as new refresh token!" + required_pagination_keys <= pagination.keys() + ), f"Pagination missing keys: {required_pagination_keys - pagination.keys()}" + + for dataset in result["data"]: + required_dataset_keys = {"id", "name", "count", "created"} + assert ( + required_dataset_keys <= dataset.keys() + ), f"Dataset missing keys: {required_dataset_keys - dataset.keys()}" + assert isinstance(dataset["id"], int) + assert isinstance(dataset["name"], str) + assert isinstance(dataset["count"], int) + try: + datetime.fromisoformat(dataset["created"]) + except ValueError: + pytest.fail(f"Dataset created date is not ISO format: {dataset['created']}") + + def test_search_sorting(self, auth_token, settings): + access_token, _ = auth_token + tenant = "demo-badgerdoc" + client = DatasetClient(settings.BASE_URL, access_token, tenant) + + result = client.search(sorting=[{"direction": "desc", "field": "name"}]) + data = result["data"] + names = [d["name"] for d in data] + assert names == sorted(names, reverse=True), "Datasets are not sorted descending by name" + + def test_search_pagination(self, auth_token, settings): + access_token, _ = auth_token + tenant = "demo-badgerdoc" + client = DatasetClient(settings.BASE_URL, access_token, tenant) + + result = client.search(page_num=1, page_size=15) + assert len(result["data"]) <= 15, "Page size exceeded" + assert result["pagination"]["page_num"] == 1 From 673a7d33d429e0b5530431f97d799dafeb229fcc Mon Sep 17 00:00:00 2001 From: asobolev Date: Wed, 20 Aug 2025 17:03:29 +0200 Subject: [PATCH 04/37] added selection-deselection test --- test_automation_framework/conftest.py | 5 +++ .../helpers/datasets/dataset_client.py | 33 +++++++++++++++++++ .../tests/test_base_api.py | 29 +++++++++++++--- 3 files changed, 63 insertions(+), 4 deletions(-) diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index bfdcbfd22..02605c301 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -24,3 +24,8 @@ def auth_token(auth_service, settings) -> tuple[str, str]: @pytest.fixture(scope="session") def settings(): return load_settings() + + +@pytest.fixture(scope="session") +def tenant(): + return "demo-badgerdoc" diff --git a/test_automation_framework/helpers/datasets/dataset_client.py b/test_automation_framework/helpers/datasets/dataset_client.py index d2ebd2957..37d896bfa 100644 --- a/test_automation_framework/helpers/datasets/dataset_client.py +++ b/test_automation_framework/helpers/datasets/dataset_client.py @@ -30,3 +30,36 @@ def search( }, ) return resp.json() + + def search_files( + self, + dataset_id: int | None = None, + page_num: int = 1, + page_size: int = 15, + ) -> dict: + """ + Fetch files. If dataset_id is provided, filter by dataset. + Otherwise, fetch all files. + """ + filters = [] + if dataset_id is not None: + filters.append({"field": "datasets.id", "operator": "eq", "value": dataset_id}) + else: + filters.append({"field": "original_name", "operator": "ilike", "value": "%%"}) + + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters, + "sorting": [{"direction": "desc", "field": "last_modified"}], + } + + resp = self.post( + "/assets/files/search", + json=payload, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + "Content-Type": "application/json", + }, + ) + return resp.json() diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index 6e669286e..1fe77d140 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -100,9 +100,8 @@ def test_search_basic(self, auth_token, settings): except ValueError: pytest.fail(f"Dataset created date is not ISO format: {dataset['created']}") - def test_search_sorting(self, auth_token, settings): + def test_search_sorting(self, auth_token, settings, tenant): access_token, _ = auth_token - tenant = "demo-badgerdoc" client = DatasetClient(settings.BASE_URL, access_token, tenant) result = client.search(sorting=[{"direction": "desc", "field": "name"}]) @@ -110,11 +109,33 @@ def test_search_sorting(self, auth_token, settings): names = [d["name"] for d in data] assert names == sorted(names, reverse=True), "Datasets are not sorted descending by name" - def test_search_pagination(self, auth_token, settings): + def test_search_pagination(self, auth_token, settings, tenant): access_token, _ = auth_token - tenant = "demo-badgerdoc" client = DatasetClient(settings.BASE_URL, access_token, tenant) result = client.search(page_num=1, page_size=15) assert len(result["data"]) <= 15, "Page size exceeded" assert result["pagination"]["page_num"] == 1 + + def test_selection(self, auth_token, settings, tenant): + access_token, _ = auth_token + client = DatasetClient(settings.BASE_URL, access_token, tenant) + + datasets = client.search()["data"] + assert datasets, "No datasets found" + dataset_id = datasets[0]["id"] + + files_selected = client.search_files(dataset_id=dataset_id)["data"] + assert isinstance(files_selected, list), "Files response is not a list" + + for f in files_selected: + assert any( + d["id"] == dataset_id for d in f.get("datasets", []) + ), f"File {f['original_name']} does not belong to dataset {dataset_id}" + + files_all = client.search_files()["data"] # no dataset_id + assert isinstance(files_all, list), "Files response is not a list" + + has_dataset = any(f.get("datasets") for f in files_all) + has_no_dataset = any(not f.get("datasets") for f in files_all) + assert has_dataset or has_no_dataset, "Unexpected empty file list" From 909f65f70b0f41767626093a7f97f681b4952cf0 Mon Sep 17 00:00:00 2001 From: asobolev Date: Thu, 21 Aug 2025 16:30:14 +0200 Subject: [PATCH 05/37] added datasets creation/deletion tests --- test_automation_framework/conftest.py | 31 +++++++++++ .../helpers/datasets/dataset_client.py | 34 +++++++++++++ .../tests/test_base_api.py | 51 ++++++++++++++++++- 3 files changed, 115 insertions(+), 1 deletion(-) diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index 02605c301..735fdc606 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -2,6 +2,19 @@ from settings import load_settings from helpers.auth.auth_service import AuthService from helpers.base_client.base_client import BaseClient +import logging +from helpers.datasets.dataset_client import DatasetClient +from logging import getLogger + + +logger = getLogger(__name__) + + +def pytest_configure(config): + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", + ) @pytest.fixture(scope="session") @@ -29,3 +42,21 @@ def settings(): @pytest.fixture(scope="session") def tenant(): return "demo-badgerdoc" + + +@pytest.fixture(scope="session") +def dataset_tracker(auth_token, settings, tenant): + access_token, _ = auth_token + + client = DatasetClient(settings.BASE_URL, access_token, tenant) + created = [] + + yield created, client + + # cleanup step + for name in created: + try: + resp = client.delete_dataset(name=name) + logger.info(f"[dataset_tracker] Deleted dataset {name}: {resp['detail']}") + except Exception as e: + logger.warning(f"[dataset_tracker] Failed to delete dataset {name}: {e}") diff --git a/test_automation_framework/helpers/datasets/dataset_client.py b/test_automation_framework/helpers/datasets/dataset_client.py index 37d896bfa..6eddd1b30 100644 --- a/test_automation_framework/helpers/datasets/dataset_client.py +++ b/test_automation_framework/helpers/datasets/dataset_client.py @@ -1,5 +1,9 @@ from __future__ import annotations + from helpers.base_client.base_client import BaseClient +import logging + +logger = logging.getLogger(__name__) class DatasetClient(BaseClient): @@ -63,3 +67,33 @@ def search_files( }, ) return resp.json() + + def create_dataset(self, name: str) -> dict: + payload = {"name": name} + resp = self.post( + "/assets/datasets", + json=payload, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + "Content-Type": "application/json", + }, + ) + resp.raise_for_status() + logger.info(f"Created dataset {name}") + return resp.json() + + def delete_dataset(self, name: str) -> dict: + payload = {"name": name} + resp = self.delete( + "/assets/datasets", + json=payload, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + "Content-Type": "application/json", + }, + ) + resp.raise_for_status() + logger.info(f"Deleted dataset {name}") + return resp.json() diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index 1fe77d140..f9fca159c 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -4,6 +4,7 @@ from helpers.menu.menu_client import MenuClient from helpers.datasets.dataset_client import DatasetClient from datetime import datetime +import uuid logger = getLogger(__name__) @@ -62,7 +63,7 @@ def test_menu(self, auth_token, settings): assert any(child["name"] == "Keycloak" for child in settings_item["children"]) -class TestDatasetClient: +class TestDatasets: def test_search_basic(self, auth_token, settings): access_token, _ = auth_token tenant = "demo-badgerdoc" @@ -139,3 +140,51 @@ def test_selection(self, auth_token, settings, tenant): has_dataset = any(f.get("datasets") for f in files_all) has_no_dataset = any(not f.get("datasets") for f in files_all) assert has_dataset or has_no_dataset, "Unexpected empty file list" + + def test_create_and_delete_dataset(self, auth_token, settings, tenant): + access_token, _ = auth_token + client = DatasetClient(settings.BASE_URL, access_token, tenant) + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + create_resp = client.create_dataset(name=dataset_name) + + assert "detail" in create_resp, f"Unexpected response: {create_resp}" + assert "successfully created" in create_resp["detail"].lower() + + search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + datasets = search_resp["data"] + + assert any(d["name"] == dataset_name for d in datasets), f"Dataset {dataset_name} not found after creation" + + delete_resp = client.delete_dataset(name=dataset_name) + + assert "detail" in delete_resp, f"Unexpected delete response: {delete_resp}" + assert "successfully deleted" in delete_resp["detail"].lower() + + search_after = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + datasets_after = search_after["data"] + + assert all( + d["name"] != dataset_name for d in datasets_after + ), f"Dataset {dataset_name} still found after deletion!" + + @pytest.mark.skip(reason="Successfully creates dataset") + def test_create_dataset_with_empty_name(self, dataset_tracker): + created, client = dataset_tracker + + with pytest.raises(HTTPError) as e: + client.create_dataset(name="") + + assert e.value.status_code in (400, 422) + + def test_create_duplicate_dataset(self, dataset_tracker): + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created.append(dataset_name) # register for cleanup + assert "successfully created" in resp["detail"].lower() + + with pytest.raises(HTTPError) as e: + client.create_dataset(name=dataset_name) + assert e.value.status_code == 400 + assert "already exists" in e.value.body.lower() From 3f2b2301402ab8f5dc7a41942ae742ac473d049e Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 22 Aug 2025 14:14:56 +0200 Subject: [PATCH 06/37] added files creation/deletion tests --- test_automation_framework/conftest.py | 20 ++++++ .../helpers/files/file_client.py | 61 +++++++++++++++++++ .../tests/test_base_api.py | 54 ++++++++++++++++ 3 files changed, 135 insertions(+) create mode 100644 test_automation_framework/helpers/files/file_client.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index 735fdc606..60dea11ab 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -5,6 +5,7 @@ import logging from helpers.datasets.dataset_client import DatasetClient from logging import getLogger +from helpers.files.file_client import FileClient logger = getLogger(__name__) @@ -60,3 +61,22 @@ def dataset_tracker(auth_token, settings, tenant): logger.info(f"[dataset_tracker] Deleted dataset {name}: {resp['detail']}") except Exception as e: logger.warning(f"[dataset_tracker] Failed to delete dataset {name}: {e}") + + +@pytest.fixture +def file_tracker(auth_token, settings, tenant): + """Tracks uploaded files and deletes them after the test session.""" + access_token, _ = auth_token + client = FileClient(settings.BASE_URL, access_token, tenant) + + created_files = [] + + yield created_files, client + + if created_files: + ids = [f["id"] for f in created_files] + try: + result = client.delete_files(ids) + logger.info(f"Deleted files: {ids}, response={result}") + except Exception as e: + logger.error(f"Failed to cleanup files {ids}: {e}") diff --git a/test_automation_framework/helpers/files/file_client.py b/test_automation_framework/helpers/files/file_client.py new file mode 100644 index 000000000..1ce9a65d4 --- /dev/null +++ b/test_automation_framework/helpers/files/file_client.py @@ -0,0 +1,61 @@ +from __future__ import annotations +from helpers.base_client.base_client import BaseClient +import logging + +logger = logging.getLogger(__name__) + + +class FileClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url) + self._token = token + self._tenant = tenant + + def upload_file(self, file_path: str) -> dict: + with open(file_path, "rb") as f: + files = {"files": (file_path.split("/")[-1], f, "application/pdf")} + resp = self.post( + "/assets/files", + files=files, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + }, + ) + logger.info(f"Uploaded file {file_path}") + return resp.json() + + def delete_files(self, ids: list[int]) -> dict: + resp = self.delete( + "/assets/files", + json={"objects": ids}, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + "Content-Type": "application/json", + }, + ) + logger.info(f"Deleted file {ids}") + return resp.json() + + def search_files( + self, + page_num: int = 1, + page_size: int = 15, + filters: list[dict] | None = None, + ) -> dict: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters or [{"field": "original_name", "operator": "ilike", "value": "%%"}], + "sorting": [{"direction": "desc", "field": "last_modified"}], + } + resp = self.post( + "/assets/files/search", + json=payload, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + "Content-Type": "application/json", + }, + ) + return resp.json() diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index f9fca159c..152774fd1 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -5,6 +5,8 @@ from helpers.datasets.dataset_client import DatasetClient from datetime import datetime import uuid +import shutil +from pathlib import Path logger = getLogger(__name__) @@ -188,3 +190,55 @@ def test_create_duplicate_dataset(self, dataset_tracker): client.create_dataset(name=dataset_name) assert e.value.status_code == 400 assert "already exists" in e.value.body.lower() + + +class TestFiles: + def test_upload_and_delete_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + + data_dir = Path(__file__).parent.parent / "data" + original_file = data_dir / "multivitamin.pdf" + + unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" + temp_file = tmp_path / unique_name + shutil.copy(original_file, temp_file) + + try: + result = client.upload_file(str(temp_file)) + assert isinstance(result, list) + file_info = result[0] + assert file_info["status"] is True + assert "id" in file_info + assert "file_name" in file_info + + created_files.append(file_info) + + search = client.search_files() + ids = [f["id"] for f in search["data"]] + assert file_info["id"] in ids, "Uploaded file not found in search" + + delete_result = client.delete_files([file_info["id"]]) + assert delete_result[0]["status"] is True + assert delete_result[0]["action"] == "delete" + + search_after = client.search_files() + ids_after = [f["id"] for f in search_after["data"]] + assert file_info["id"] not in ids_after, "File was not deleted properly" + + created_files.clear() + + finally: + if temp_file.exists(): + temp_file.unlink() + + @pytest.mark.parametrize("content", ["", " "]) + def test_upload_empty_file(self, file_tracker, tmp_path, content): + _, client = file_tracker + + empty_file = tmp_path / f"{uuid.uuid4().hex}_empty.pdf" + empty_file.write_text(content) + + with pytest.raises(HTTPError) as e: + client.upload_file(str(empty_file)) + assert e.value.status_code == 500 + assert "Internal Server Error" in e.value.body From 22575f63f920085b273fd40964964e872659163e Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 22 Aug 2025 17:25:46 +0200 Subject: [PATCH 07/37] added files moving test --- .../helpers/files/file_client.py | 15 +++++ .../tests/test_base_api.py | 64 +++++++++++++++++-- 2 files changed, 75 insertions(+), 4 deletions(-) diff --git a/test_automation_framework/helpers/files/file_client.py b/test_automation_framework/helpers/files/file_client.py index 1ce9a65d4..3083fb72c 100644 --- a/test_automation_framework/helpers/files/file_client.py +++ b/test_automation_framework/helpers/files/file_client.py @@ -59,3 +59,18 @@ def search_files( }, ) return resp.json() + + def move_files(self, name: str, objects: list) -> dict: + payload = {"name": name, "objects": objects} + resp = self.post( + "/assets/datasets/bonds", + json=payload, + headers={ + "Authorization": f"Bearer {self._token}", + "X-Current-Tenant": self._tenant, + "Content-Type": "application/json", + }, + ) + resp.raise_for_status() + logger.info(f"Moved object {objects} to the dataset {name}") + return resp.json() diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index 152774fd1..b55758084 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -66,9 +66,8 @@ def test_menu(self, auth_token, settings): class TestDatasets: - def test_search_basic(self, auth_token, settings): + def test_search_basic(self, auth_token, settings, tenant): access_token, _ = auth_token - tenant = "demo-badgerdoc" client = DatasetClient(settings.BASE_URL, access_token, tenant) result = client.search() @@ -136,7 +135,7 @@ def test_selection(self, auth_token, settings, tenant): d["id"] == dataset_id for d in f.get("datasets", []) ), f"File {f['original_name']} does not belong to dataset {dataset_id}" - files_all = client.search_files()["data"] # no dataset_id + files_all = client.search_files()["data"] assert isinstance(files_all, list), "Files response is not a list" has_dataset = any(f.get("datasets") for f in files_all) @@ -183,7 +182,7 @@ def test_create_duplicate_dataset(self, dataset_tracker): created, client = dataset_tracker dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" resp = client.create_dataset(name=dataset_name) - created.append(dataset_name) # register for cleanup + created.append(dataset_name) assert "successfully created" in resp["detail"].lower() with pytest.raises(HTTPError) as e: @@ -232,6 +231,7 @@ def test_upload_and_delete_file(self, file_tracker, tmp_path): temp_file.unlink() @pytest.mark.parametrize("content", ["", " "]) + @pytest.mark.skip(reason="Uploads a file, but returns 500") def test_upload_empty_file(self, file_tracker, tmp_path, content): _, client = file_tracker @@ -242,3 +242,59 @@ def test_upload_empty_file(self, file_tracker, tmp_path, content): client.upload_file(str(empty_file)) assert e.value.status_code == 500 assert "Internal Server Error" in e.value.body + + def test_move_file(self, file_tracker, dataset_tracker, tmp_path): + created_datasets, dataset_client = dataset_tracker + first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + + first_resp = dataset_client.create_dataset(name=first_dataset_name) + created_datasets.append(first_dataset_name) + assert "successfully created" in first_resp["detail"].lower() + datasets = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}])[ + "data" + ] + assert len(datasets) == 1, f"Expected 1 dataset, got {len(datasets)}" + first_dataset_id = datasets[0]["id"] + + second_resp = dataset_client.create_dataset(name=second_dataset_name) + created_datasets.append(second_dataset_name) + datasets = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}])[ + "data" + ] + assert len(datasets) == 1, f"Expected 1 dataset, got {len(datasets)}" + second_dataset_id = datasets[0]["id"] + assert "successfully created" in second_resp["detail"].lower() + + created_files, file_client = file_tracker + + data_dir = Path(__file__).parent.parent / "data" + original_file = data_dir / "multivitamin.pdf" + unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" + temp_file = tmp_path / unique_name + shutil.copy(original_file, temp_file) + + try: + result = file_client.upload_file(str(temp_file)) + file_info = result[0] + assert file_info["status"] is True + created_files.append(file_info) + file_id = file_info["id"] + + move1 = file_client.move_files(name=first_dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first), "File not found in first dataset after move" + + move2 = file_client.move_files(name=second_dataset_name, objects=[file_id])[0] + assert move2["status"] is True + assert "successfully bounded" in move2["message"].lower() + + files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_second), "File not found in second dataset after move" + + finally: + if temp_file.exists(): + temp_file.unlink() From 8a9a2360470d766bf21c58d2efc8ad5ff18bd5f1 Mon Sep 17 00:00:00 2001 From: asobolev Date: Mon, 25 Aug 2025 17:47:50 +0200 Subject: [PATCH 08/37] refactoring and jobs creation test --- test_automation_framework/conftest.py | 104 ++++--- .../helpers/auth/auth_service.py | 8 +- .../helpers/base_client/base_client.py | 41 ++- .../helpers/datasets/dataset_client.py | 56 +--- .../helpers/files/file_client.py | 50 +--- .../helpers/jobs/jobs_client.py | 106 +++++++ .../helpers/menu/menu_client.py | 13 +- .../tests/test_base_api.py | 263 +++++++----------- 8 files changed, 345 insertions(+), 296 deletions(-) create mode 100644 test_automation_framework/helpers/jobs/jobs_client.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index 60dea11ab..53bf911d1 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -1,25 +1,36 @@ +import logging +from logging import getLogger +from typing import Tuple + import pytest + from settings import load_settings from helpers.auth.auth_service import AuthService from helpers.base_client.base_client import BaseClient -import logging from helpers.datasets.dataset_client import DatasetClient -from logging import getLogger from helpers.files.file_client import FileClient - +from helpers.jobs.jobs_client import JobsClient +from helpers.menu.menu_client import MenuClient logger = getLogger(__name__) -def pytest_configure(config): - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", - ) +def pytest_configure(): + logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s") @pytest.fixture(scope="session") -def base_client(settings): +def settings(): + return load_settings() + + +@pytest.fixture(scope="session") +def tenant(settings) -> str: + return getattr(settings, "TENANT", "demo-badgerdoc") + + +@pytest.fixture(scope="session") +def base_client(settings) -> BaseClient: client = BaseClient(settings.BASE_URL, timeout=10) yield client client.close() @@ -31,52 +42,71 @@ def auth_service(base_client) -> AuthService: @pytest.fixture(scope="session") -def auth_token(auth_service, settings) -> tuple[str, str]: +def auth_token(auth_service, settings) -> Tuple[str, str]: return auth_service.get_token(settings.API_USER, settings.API_PASS.get_secret_value()) -@pytest.fixture(scope="session") -def settings(): - return load_settings() +@pytest.fixture +def access_token(auth_token) -> str: + return auth_token[0] -@pytest.fixture(scope="session") -def tenant(): - return "demo-badgerdoc" +@pytest.fixture +def menu_client(settings, access_token, tenant) -> MenuClient: + return MenuClient(settings.BASE_URL, access_token, tenant) -@pytest.fixture(scope="session") -def dataset_tracker(auth_token, settings, tenant): - access_token, _ = auth_token +@pytest.fixture +def dataset_client(settings, access_token, tenant) -> DatasetClient: + return DatasetClient(settings.BASE_URL, access_token, tenant) + + +@pytest.fixture +def file_client(settings, access_token, tenant) -> FileClient: + return FileClient(settings.BASE_URL, access_token, tenant) + - client = DatasetClient(settings.BASE_URL, access_token, tenant) - created = [] +@pytest.fixture +def jobs_client(settings, access_token, tenant) -> JobsClient: + return JobsClient(settings.BASE_URL, access_token, tenant) - yield created, client - # cleanup step +@pytest.fixture +def dataset_tracker(dataset_client): + created: list[str] = [] + yield created, dataset_client for name in created: try: - resp = client.delete_dataset(name=name) - logger.info(f"[dataset_tracker] Deleted dataset {name}: {resp['detail']}") + resp = dataset_client.delete_dataset(name=name) + logger.info(f"[dataset_tracker] Deleted dataset {name}: {resp.get('detail')}") except Exception as e: logger.warning(f"[dataset_tracker] Failed to delete dataset {name}: {e}") @pytest.fixture -def file_tracker(auth_token, settings, tenant): - """Tracks uploaded files and deletes them after the test session.""" - access_token, _ = auth_token - client = FileClient(settings.BASE_URL, access_token, tenant) - - created_files = [] +def file_tracker(file_client): + created_files: list[dict] = [] + yield created_files, file_client + if created_files: + ids = [f["id"] for f in created_files if f.get("id") is not None] + if ids: + try: + result = file_client.delete_files(ids) + logger.info(f"[file_tracker] Deleted files: {ids}, response={result}") + except Exception as e: + logger.warning(f"[file_tracker] Failed to cleanup files {ids}: {e}") - yield created_files, client - if created_files: - ids = [f["id"] for f in created_files] +@pytest.fixture +def job_tracker(jobs_client): + created: list[dict] = [] + yield created, jobs_client + for job in created: + job_id = job.get("id") or job.get("job_id") or (job.get("job") or {}).get("id") + if not job_id: + continue try: - result = client.delete_files(ids) - logger.info(f"Deleted files: {ids}, response={result}") + jobs_client.post("/jobs/jobs/cancel", json={"id": job_id}, headers=jobs_client._default_headers()) + logger.info(f"[job_tracker] Cancelled job {job_id}") except Exception as e: - logger.error(f"Failed to cleanup files {ids}: {e}") + logger.warning(f"[job_tracker] Could not cancel job {job_id}: {e}") diff --git a/test_automation_framework/helpers/auth/auth_service.py b/test_automation_framework/helpers/auth/auth_service.py index 5c5ffecbc..89753c18c 100644 --- a/test_automation_framework/helpers/auth/auth_service.py +++ b/test_automation_framework/helpers/auth/auth_service.py @@ -20,7 +20,7 @@ def __init__(self, client: BaseClient) -> None: self.client = client def get_token(self, username: str, password: str, client_id: str = "admin-cli") -> tuple[str, str]: - resp = self.client.post( + resp = self.client.post_json( "/users/token", data={ "grant_type": "password", @@ -30,11 +30,11 @@ def get_token(self, username: str, password: str, client_id: str = "admin-cli") }, headers={"Content-Type": "application/x-www-form-urlencoded"}, ) - result = TokenResponse.model_validate(resp.json()) + result = TokenResponse.model_validate(resp) return result.access_token, result.refresh_token def refresh_token(self, refresh_token: str, client_id: str = "admin-cli") -> tuple[str, str]: - resp = self.client.post( + resp = self.client.post_json( "/users/refresh_token", json={ "grant_type": "refresh_token", @@ -42,5 +42,5 @@ def refresh_token(self, refresh_token: str, client_id: str = "admin-cli") -> tup "refresh_token": refresh_token, }, ) - result = TokenResponse.model_validate(resp.json()) + result = TokenResponse.model_validate(resp) return result.access_token, result.refresh_token diff --git a/test_automation_framework/helpers/base_client/base_client.py b/test_automation_framework/helpers/base_client/base_client.py index 06b23bcd2..69a476f35 100644 --- a/test_automation_framework/helpers/base_client/base_client.py +++ b/test_automation_framework/helpers/base_client/base_client.py @@ -20,16 +20,39 @@ def __init__( class BaseClient: - def __init__(self, base_url: str, timeout: int = 30) -> None: + def __init__( + self, base_url: str, timeout: int = 30, token: Optional[str] = None, tenant: Optional[str] = None + ) -> None: self.base_url = base_url.rstrip("/") self.timeout = timeout + self._token = token + self._tenant = tenant self._client = httpx.Client(base_url=self.base_url, timeout=self.timeout) - def _request(self, method: str, path: str, **kwargs: Any) -> httpx.Response: + def set_token(self, token: str | None) -> None: + self._token = token + + def set_tenant(self, tenant: str | None) -> None: + self._tenant = tenant + + def _default_headers(self, content_type_json: bool = False, extra: dict[str, str] | None = None) -> dict[str, str]: + headers: dict[str, str] = {} + if self._token: + headers["Authorization"] = f"Bearer {self._token}" + if self._tenant: + headers["X-Current-Tenant"] = self._tenant + if content_type_json: + headers["Content-Type"] = "application/json" + if extra: + headers.update(extra) + return headers + + def _request(self, method: str, path: str, headers: dict | None = None, **kwargs: Any) -> httpx.Response: rel_path = path if path.startswith("/") else "/" + path start = time.perf_counter() + merged_headers = {**self._default_headers(), **(headers or {})} try: - resp = self._client.request(method, rel_path, **kwargs) + resp = self._client.request(method, rel_path, headers=merged_headers, **kwargs) resp.raise_for_status() logger.debug( f"HTTP {method} {self.base_url}{rel_path} -> {resp.status_code} in {time.perf_counter() - start:.3f}s" @@ -61,6 +84,18 @@ def put(self, path: str, **kwargs: Any) -> httpx.Response: def delete(self, path: str, **kwargs: Any) -> httpx.Response: return self._request("DELETE", path, **kwargs) + def get_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("GET", path, headers=headers, **kwargs).json() + + def post_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("POST", path, headers=headers, **kwargs).json() + + def put_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("PUT", path, headers=headers, **kwargs).json() + + def delete_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("DELETE", path, headers=headers, **kwargs).json() + def close(self) -> None: self._client.close() diff --git a/test_automation_framework/helpers/datasets/dataset_client.py b/test_automation_framework/helpers/datasets/dataset_client.py index 6eddd1b30..653213a6e 100644 --- a/test_automation_framework/helpers/datasets/dataset_client.py +++ b/test_automation_framework/helpers/datasets/dataset_client.py @@ -8,9 +8,7 @@ class DatasetClient(BaseClient): def __init__(self, base_url: str, token: str, tenant: str) -> None: - super().__init__(base_url) - self._token = token - self._tenant = tenant + super().__init__(base_url, token=token, tenant=tenant) def search( self, @@ -24,16 +22,9 @@ def search( "filters": filters or [], "sorting": sorting or [{"direction": "asc", "field": "name"}], } - resp = self.post( - "/assets/datasets/search", - json=payload, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - "Content-Type": "application/json", - }, + return self.post_json( + "/assets/datasets/search", json=payload, headers=self._default_headers(content_type_json=True) ) - return resp.json() def search_files( self, @@ -41,10 +32,6 @@ def search_files( page_num: int = 1, page_size: int = 15, ) -> dict: - """ - Fetch files. If dataset_id is provided, filter by dataset. - Otherwise, fetch all files. - """ filters = [] if dataset_id is not None: filters.append({"field": "datasets.id", "operator": "eq", "value": dataset_id}) @@ -57,43 +44,18 @@ def search_files( "sorting": [{"direction": "desc", "field": "last_modified"}], } - resp = self.post( - "/assets/files/search", - json=payload, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - "Content-Type": "application/json", - }, + return self.post_json( + "/assets/files/search", json=payload, headers=self._default_headers(content_type_json=True) ) - return resp.json() def create_dataset(self, name: str) -> dict: payload = {"name": name} - resp = self.post( - "/assets/datasets", - json=payload, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - "Content-Type": "application/json", - }, - ) - resp.raise_for_status() + resp = self.post_json("/assets/datasets", json=payload, headers=self._default_headers(content_type_json=True)) logger.info(f"Created dataset {name}") - return resp.json() + return resp def delete_dataset(self, name: str) -> dict: payload = {"name": name} - resp = self.delete( - "/assets/datasets", - json=payload, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - "Content-Type": "application/json", - }, - ) - resp.raise_for_status() + resp = self.delete_json("/assets/datasets", json=payload, headers=self._default_headers(content_type_json=True)) logger.info(f"Deleted dataset {name}") - return resp.json() + return resp diff --git a/test_automation_framework/helpers/files/file_client.py b/test_automation_framework/helpers/files/file_client.py index 3083fb72c..5a79e9e58 100644 --- a/test_automation_framework/helpers/files/file_client.py +++ b/test_automation_framework/helpers/files/file_client.py @@ -1,42 +1,30 @@ from __future__ import annotations from helpers.base_client.base_client import BaseClient import logging +from typing import List logger = logging.getLogger(__name__) class FileClient(BaseClient): def __init__(self, base_url: str, token: str, tenant: str) -> None: - super().__init__(base_url) - self._token = token - self._tenant = tenant + super().__init__(base_url, token=token, tenant=tenant) def upload_file(self, file_path: str) -> dict: with open(file_path, "rb") as f: files = {"files": (file_path.split("/")[-1], f, "application/pdf")} - resp = self.post( - "/assets/files", - files=files, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - }, - ) + resp = self.post("/assets/files", files=files, headers=self._default_headers()) logger.info(f"Uploaded file {file_path}") return resp.json() - def delete_files(self, ids: list[int]) -> dict: - resp = self.delete( + def delete_files(self, ids: List[int]) -> dict: + resp = self.delete_json( "/assets/files", json={"objects": ids}, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - "Content-Type": "application/json", - }, + headers=self._default_headers(content_type_json=True), ) logger.info(f"Deleted file {ids}") - return resp.json() + return resp def search_files( self, @@ -49,28 +37,14 @@ def search_files( "filters": filters or [{"field": "original_name", "operator": "ilike", "value": "%%"}], "sorting": [{"direction": "desc", "field": "last_modified"}], } - resp = self.post( - "/assets/files/search", - json=payload, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - "Content-Type": "application/json", - }, + return self.post_json( + "/assets/files/search", json=payload, headers=self._default_headers(content_type_json=True) ) - return resp.json() def move_files(self, name: str, objects: list) -> dict: payload = {"name": name, "objects": objects} - resp = self.post( - "/assets/datasets/bonds", - json=payload, - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - "Content-Type": "application/json", - }, + resp = self.post_json( + "/assets/datasets/bonds", json=payload, headers=self._default_headers(content_type_json=True) ) - resp.raise_for_status() logger.info(f"Moved object {objects} to the dataset {name}") - return resp.json() + return resp diff --git a/test_automation_framework/helpers/jobs/jobs_client.py b/test_automation_framework/helpers/jobs/jobs_client.py new file mode 100644 index 000000000..f58623ccf --- /dev/null +++ b/test_automation_framework/helpers/jobs/jobs_client.py @@ -0,0 +1,106 @@ +from __future__ import annotations +from typing import Any, Dict, List +import time +import logging + +from helpers.base_client.base_client import BaseClient + +logger = logging.getLogger(__name__) + + +class JobsClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def get_supported_pipelines(self) -> List[Dict[str, Any]]: + return self.get_json("/jobs/pipelines/support", headers=self._default_headers()) + + def get_pipeline(self, engine_resource: str) -> Dict[str, Any]: + return self.get_json(f"/jobs/pipelines/{engine_resource}", headers=self._default_headers()) + + def create_job( + self, + name: str, + file_ids: list[int], + pipeline_id: str, + pipeline_engine: str, + owners: list[str], + datasets: list[int] | None = None, + categories: list[str] | None = None, + annotators: list[str] | None = None, + validators: list[str] | None = None, + previous_jobs: list[int] | None = None, + revisions: list[int] | None = None, + is_draft: bool = False, + is_auto_distribution: bool = False, + start_manual_job_automatically: bool = False, + job_type: str = "ExtractionJob", + pipeline_name: str | None = None, + ): + payload = { + "name": name, + "revisions": revisions or [], + "datasets": datasets or [], + "files": file_ids, + "previous_jobs": previous_jobs or [], + "type": job_type, + "is_draft": is_draft, + "is_auto_distribution": is_auto_distribution, + "start_manual_job_automatically": start_manual_job_automatically, + "categories": categories or [], + "owners": owners or [], + "annotators": annotators or [], + "validators": validators or [], + "pipeline_name": pipeline_name or pipeline_id, + "pipeline_id": pipeline_id, + "pipeline_engine": pipeline_engine, + } + + return self.post_json( + "/jobs/jobs/create_job", json=payload, headers=self._default_headers(content_type_json=True) + ) + + def get_job(self, job_id: int) -> Dict[str, Any]: + return self.get_json(f"/jobs/jobs/{job_id}", headers=self._default_headers()) + + def get_progress(self, job_id: int) -> Dict[str, Any]: + return self.post_json( + "/jobs/jobs/progress", json={"ids": [job_id]}, headers=self._default_headers(content_type_json=True) + ) + + def poll_until_finished( + self, + job_id: int, + timeout_seconds: int = 120, + interval_seconds: float = 1.0, + backoff_factor: float = 1.5, + ) -> Dict[str, Any]: + start = time.monotonic() + current_interval = interval_seconds + + logger.info(f"Polling job {job_id} until finished (timeout {timeout_seconds}s)") + while True: + elapsed = time.monotonic() - start + if elapsed > timeout_seconds: + raise TimeoutError(f"Job {job_id} not finished after {timeout_seconds}s") + job_obj = self.get_job(job_id) + status = job_obj.get("status") or job_obj.get("data", {}).get("status") + logger.info(f"Polled job {job_id} status: {status}") + + if status and str(status).lower() in {"finished", "success", "completed"}: + logger.info(f"Job {job_id} finished with status={status}") + return job_obj + try: + progress = self.get_progress(job_id) + if isinstance(progress, dict): + for k, v in progress.items(): + if str(k) == str(job_id) and isinstance(v, dict): + fin = v.get("finished") + tot = v.get("total") + if fin is not None and tot is not None and fin >= tot: + logger.info("Progress shows job finished (finished>=total)") + return self.get_job(job_id) + except Exception: + logger.debug(f"Progress probe failed for job {job_id}; will retry") + time.sleep(current_interval) + current_interval = min(current_interval * backoff_factor, 10.0) diff --git a/test_automation_framework/helpers/menu/menu_client.py b/test_automation_framework/helpers/menu/menu_client.py index 7861b2206..9081dbc88 100644 --- a/test_automation_framework/helpers/menu/menu_client.py +++ b/test_automation_framework/helpers/menu/menu_client.py @@ -4,16 +4,7 @@ class MenuClient(BaseClient): def __init__(self, base_url: str, token: str, tenant: str) -> None: - super().__init__(base_url) - self._token = token - self._tenant = tenant + super().__init__(base_url, token=token, tenant=tenant) def get_menu(self) -> list[dict]: - resp = self.get( - "/core/menu", - headers={ - "Authorization": f"Bearer {self._token}", - "X-Current-Tenant": self._tenant, - }, - ) - return resp.json() + return self.get_json("/core/menu", headers=self._default_headers()) diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index b55758084..b4a938d3b 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -1,12 +1,12 @@ from logging import getLogger -import pytest -from helpers.base_client.base_client import HTTPError -from helpers.menu.menu_client import MenuClient -from helpers.datasets.dataset_client import DatasetClient from datetime import datetime +from pathlib import Path import uuid import shutil -from pathlib import Path + +import pytest + +from helpers.base_client.base_client import HTTPError logger = getLogger(__name__) @@ -14,160 +14,95 @@ class TestAuthAPI: def test_basic_auth(self, auth_token): access_token, refresh_token = auth_token - assert access_token, "No access_token found!" - assert refresh_token, "No refresh_token found!" + assert access_token + assert refresh_token def test_wrong_creds(self, auth_service): - with pytest.raises(HTTPError) as e: + with pytest.raises(HTTPError) as exc: auth_service.get_token("wrong", "wrong") - assert e.value.status_code == 401, f"Expected 401 but got {e.value.status_code}: {e.value.body}" + assert exc.value.status_code == 401 def test_refresh_token(self, auth_token, auth_service): access_token, refresh_token = auth_token - new_access_token, new_refresh_token = auth_service.refresh_token(refresh_token=refresh_token) - assert new_access_token != access_token, "Old access token is the same as new access token!" - assert new_refresh_token != refresh_token, "Old refresh token is the same as new refresh token!" + new_access, new_refresh = auth_service.refresh_token(refresh_token=refresh_token) + assert new_access != access_token + assert new_refresh != refresh_token class TestAPI: - def test_menu(self, auth_token, settings): - access_token, _ = auth_token - tenant = "demo-badgerdoc" - menu_client = MenuClient(settings.BASE_URL, access_token, tenant) + def test_menu(self, menu_client): menu = menu_client.get_menu() - - assert isinstance(menu, list), "Menu is expected to be a list!" - assert len(menu), "Menu should have been returned!" - - required_keys = { - "name", - "badgerdoc_path", - "is_external", - "is_iframe", - "url", - "children", - } + assert isinstance(menu, list) + assert menu + required_keys = {"name", "badgerdoc_path", "is_external", "is_iframe", "url", "children"} for item in menu: - assert required_keys <= item.keys(), f"Menu item missing keys: {item}" - + assert required_keys <= item.keys() first_item = menu[0] assert isinstance(first_item["name"], str) assert isinstance(first_item["badgerdoc_path"], str) assert isinstance(first_item["is_external"], bool) assert isinstance(first_item["children"], (list, type(None))) - expected_names = {"Documents", "My Tasks", "Jobs", "Settings"} actual_names = {item["name"] for item in menu} - assert expected_names <= actual_names, f"Missing expected menu items: {expected_names - actual_names}" - - settings_item = next(item for item in menu if item["name"] == "Settings") + assert expected_names <= actual_names + settings_item = next(i for i in menu if i["name"] == "Settings") assert isinstance(settings_item["children"], list) assert any(child["name"] == "Keycloak" for child in settings_item["children"]) class TestDatasets: - def test_search_basic(self, auth_token, settings, tenant): - access_token, _ = auth_token - client = DatasetClient(settings.BASE_URL, access_token, tenant) - - result = client.search() - - assert "pagination" in result, "Response must have 'pagination'" - assert "data" in result, "Response must have 'data'" - assert isinstance(result["data"], list), "'data' must be a list" - + def test_search_basic(self, dataset_client): + result = dataset_client.search() + assert "pagination" in result + assert "data" in result + assert isinstance(result["data"], list) pagination = result["pagination"] - required_pagination_keys = { - "page_num", - "page_offset", - "page_size", - "min_pages_left", - "total", - "has_more", - } - assert ( - required_pagination_keys <= pagination.keys() - ), f"Pagination missing keys: {required_pagination_keys - pagination.keys()}" - + required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} + assert required_pagination_keys <= pagination.keys() for dataset in result["data"]: required_dataset_keys = {"id", "name", "count", "created"} - assert ( - required_dataset_keys <= dataset.keys() - ), f"Dataset missing keys: {required_dataset_keys - dataset.keys()}" + assert required_dataset_keys <= dataset.keys() assert isinstance(dataset["id"], int) assert isinstance(dataset["name"], str) assert isinstance(dataset["count"], int) - try: - datetime.fromisoformat(dataset["created"]) - except ValueError: - pytest.fail(f"Dataset created date is not ISO format: {dataset['created']}") - - def test_search_sorting(self, auth_token, settings, tenant): - access_token, _ = auth_token - client = DatasetClient(settings.BASE_URL, access_token, tenant) - - result = client.search(sorting=[{"direction": "desc", "field": "name"}]) - data = result["data"] - names = [d["name"] for d in data] - assert names == sorted(names, reverse=True), "Datasets are not sorted descending by name" + datetime.fromisoformat(dataset["created"]) - def test_search_pagination(self, auth_token, settings, tenant): - access_token, _ = auth_token - client = DatasetClient(settings.BASE_URL, access_token, tenant) + def test_search_sorting(self, dataset_client): + result = dataset_client.search(sorting=[{"direction": "desc", "field": "name"}]) + names = [d["name"] for d in result["data"]] + assert names == sorted(names, reverse=True) - result = client.search(page_num=1, page_size=15) - assert len(result["data"]) <= 15, "Page size exceeded" + def test_search_pagination(self, dataset_client): + result = dataset_client.search(page_num=1, page_size=15) + assert len(result["data"]) <= 15 assert result["pagination"]["page_num"] == 1 - def test_selection(self, auth_token, settings, tenant): - access_token, _ = auth_token - client = DatasetClient(settings.BASE_URL, access_token, tenant) - - datasets = client.search()["data"] - assert datasets, "No datasets found" + def test_selection(self, dataset_client): + datasets = dataset_client.search()["data"] + assert datasets dataset_id = datasets[0]["id"] - - files_selected = client.search_files(dataset_id=dataset_id)["data"] - assert isinstance(files_selected, list), "Files response is not a list" - + files_selected = dataset_client.search_files(dataset_id=dataset_id)["data"] + assert isinstance(files_selected, list) for f in files_selected: - assert any( - d["id"] == dataset_id for d in f.get("datasets", []) - ), f"File {f['original_name']} does not belong to dataset {dataset_id}" - - files_all = client.search_files()["data"] - assert isinstance(files_all, list), "Files response is not a list" - + assert any(d["id"] == dataset_id for d in f.get("datasets", [])) + files_all = dataset_client.search_files()["data"] + assert isinstance(files_all, list) has_dataset = any(f.get("datasets") for f in files_all) has_no_dataset = any(not f.get("datasets") for f in files_all) - assert has_dataset or has_no_dataset, "Unexpected empty file list" - - def test_create_and_delete_dataset(self, auth_token, settings, tenant): - access_token, _ = auth_token - client = DatasetClient(settings.BASE_URL, access_token, tenant) + assert has_dataset or has_no_dataset + def test_create_and_delete_dataset(self, dataset_client): dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - create_resp = client.create_dataset(name=dataset_name) - - assert "detail" in create_resp, f"Unexpected response: {create_resp}" + create_resp = dataset_client.create_dataset(name=dataset_name) + assert "detail" in create_resp assert "successfully created" in create_resp["detail"].lower() - - search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) - datasets = search_resp["data"] - - assert any(d["name"] == dataset_name for d in datasets), f"Dataset {dataset_name} not found after creation" - - delete_resp = client.delete_dataset(name=dataset_name) - - assert "detail" in delete_resp, f"Unexpected delete response: {delete_resp}" + search_resp = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert any(d["name"] == dataset_name for d in search_resp["data"]) + delete_resp = dataset_client.delete_dataset(name=dataset_name) + assert "detail" in delete_resp assert "successfully deleted" in delete_resp["detail"].lower() - - search_after = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) - datasets_after = search_after["data"] - - assert all( - d["name"] != dataset_name for d in datasets_after - ), f"Dataset {dataset_name} still found after deletion!" + search_after = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert all(d["name"] != dataset_name for d in search_after["data"]) @pytest.mark.skip(reason="Successfully creates dataset") def test_create_dataset_with_empty_name(self, dataset_tracker): @@ -184,24 +119,20 @@ def test_create_duplicate_dataset(self, dataset_tracker): resp = client.create_dataset(name=dataset_name) created.append(dataset_name) assert "successfully created" in resp["detail"].lower() - - with pytest.raises(HTTPError) as e: + with pytest.raises(HTTPError) as exc: client.create_dataset(name=dataset_name) - assert e.value.status_code == 400 - assert "already exists" in e.value.body.lower() + assert exc.value.status_code == 400 + assert "already exists" in exc.value.body.lower() class TestFiles: def test_upload_and_delete_file(self, file_tracker, tmp_path): created_files, client = file_tracker - data_dir = Path(__file__).parent.parent / "data" original_file = data_dir / "multivitamin.pdf" - unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" temp_file = tmp_path / unique_name shutil.copy(original_file, temp_file) - try: result = client.upload_file(str(temp_file)) assert isinstance(result, list) @@ -209,92 +140,112 @@ def test_upload_and_delete_file(self, file_tracker, tmp_path): assert file_info["status"] is True assert "id" in file_info assert "file_name" in file_info - created_files.append(file_info) - search = client.search_files() ids = [f["id"] for f in search["data"]] - assert file_info["id"] in ids, "Uploaded file not found in search" - + assert file_info["id"] in ids delete_result = client.delete_files([file_info["id"]]) assert delete_result[0]["status"] is True assert delete_result[0]["action"] == "delete" - search_after = client.search_files() ids_after = [f["id"] for f in search_after["data"]] - assert file_info["id"] not in ids_after, "File was not deleted properly" - + assert file_info["id"] not in ids_after created_files.clear() - finally: if temp_file.exists(): temp_file.unlink() - @pytest.mark.parametrize("content", ["", " "]) @pytest.mark.skip(reason="Uploads a file, but returns 500") - def test_upload_empty_file(self, file_tracker, tmp_path, content): - _, client = file_tracker - + @pytest.mark.parametrize("content", ["", " "]) + def test_upload_empty_file(self, file_client, tmp_path, content): empty_file = tmp_path / f"{uuid.uuid4().hex}_empty.pdf" empty_file.write_text(content) - - with pytest.raises(HTTPError) as e: - client.upload_file(str(empty_file)) - assert e.value.status_code == 500 - assert "Internal Server Error" in e.value.body + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(empty_file)) + assert exc.value.status_code == 500 + assert "Internal Server Error" in exc.value.body def test_move_file(self, file_tracker, dataset_tracker, tmp_path): created_datasets, dataset_client = dataset_tracker first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - first_resp = dataset_client.create_dataset(name=first_dataset_name) created_datasets.append(first_dataset_name) assert "successfully created" in first_resp["detail"].lower() datasets = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}])[ "data" ] - assert len(datasets) == 1, f"Expected 1 dataset, got {len(datasets)}" + assert len(datasets) == 1 first_dataset_id = datasets[0]["id"] - second_resp = dataset_client.create_dataset(name=second_dataset_name) created_datasets.append(second_dataset_name) datasets = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}])[ "data" ] - assert len(datasets) == 1, f"Expected 1 dataset, got {len(datasets)}" + assert len(datasets) == 1 second_dataset_id = datasets[0]["id"] assert "successfully created" in second_resp["detail"].lower() - created_files, file_client = file_tracker - data_dir = Path(__file__).parent.parent / "data" original_file = data_dir / "multivitamin.pdf" unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" temp_file = tmp_path / unique_name shutil.copy(original_file, temp_file) - try: result = file_client.upload_file(str(temp_file)) file_info = result[0] assert file_info["status"] is True created_files.append(file_info) file_id = file_info["id"] - move1 = file_client.move_files(name=first_dataset_name, objects=[file_id])[0] assert move1["status"] is True assert "successfully bounded" in move1["message"].lower() - files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] - assert any(f["id"] == file_id for f in files_in_first), "File not found in first dataset after move" - + assert any(f["id"] == file_id for f in files_in_first) move2 = file_client.move_files(name=second_dataset_name, objects=[file_id])[0] assert move2["status"] is True assert "successfully bounded" in move2["message"].lower() - files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] - assert any(f["id"] == file_id for f in files_in_second), "File not found in second dataset after move" - + assert any(f["id"] == file_id for f in files_in_second) finally: if temp_file.exists(): temp_file.unlink() + + +class TestJobs: + def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path): + created_files, file_client = file_tracker + created_datasets, dataset_client = dataset_tracker + data_dir = Path(__file__).parent.parent / "data" + original_file = data_dir / "multivitamin.pdf" + unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" + tmp_file = tmp_path / unique_name + shutil.copy(original_file, tmp_file) + upload_result = file_client.upload_file(str(tmp_file)) + file_info = upload_result[0] + assert file_info["status"] is True + created_files.append(file_info) + file_id = file_info["id"] + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + resp = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + move_resp = file_client.move_files(name=dataset_name, objects=[file_id])[0] + assert move_resp["status"] is True + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + pipeline_id="print", + file_ids=[file_id], + datasets=[], + owners=["0dc326e4-b190-4881-8d05-12359052abbf"], + pipeline_engine="airflow", + ) + job_tracker[0].append(create_resp) + job_id = create_resp.get("id") + assert job_id + final_job = jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) + status = final_job.get("status") + assert str(status).lower() in {"finished", "success", "completed"} + job_files = final_job.get("files") or [] + assert file_id in job_files From a2e7bedadb7835bde868d65e4e39116c70dd36a3 Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 26 Aug 2025 10:57:10 +0200 Subject: [PATCH 09/37] small fix for jobs creation test --- test_automation_framework/helpers/jobs/jobs_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_automation_framework/helpers/jobs/jobs_client.py b/test_automation_framework/helpers/jobs/jobs_client.py index f58623ccf..573a5ed63 100644 --- a/test_automation_framework/helpers/jobs/jobs_client.py +++ b/test_automation_framework/helpers/jobs/jobs_client.py @@ -65,7 +65,7 @@ def get_job(self, job_id: int) -> Dict[str, Any]: def get_progress(self, job_id: int) -> Dict[str, Any]: return self.post_json( - "/jobs/jobs/progress", json={"ids": [job_id]}, headers=self._default_headers(content_type_json=True) + "/jobs/jobs/progress", json=[job_id], headers=self._default_headers(content_type_json=True) ) def poll_until_finished( From 01751b2601bd6bff54e62ae14bbe7e7481754ffc Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 26 Aug 2025 13:33:06 +0200 Subject: [PATCH 10/37] moved some constants from tests --- test_automation_framework/helpers/constants.py | 3 +++ test_automation_framework/helpers/jobs/jobs_client.py | 5 +++-- test_automation_framework/tests/test_base_api.py | 9 +++------ 3 files changed, 9 insertions(+), 8 deletions(-) create mode 100644 test_automation_framework/helpers/constants.py diff --git a/test_automation_framework/helpers/constants.py b/test_automation_framework/helpers/constants.py new file mode 100644 index 000000000..d1e688451 --- /dev/null +++ b/test_automation_framework/helpers/constants.py @@ -0,0 +1,3 @@ +OWNER_UUID = "0dc326e4-b190-4881-8d05-12359052abbf" +AIRFLOW_PIPELINE = "airflow" +PRINT_PIPELINE = "print" diff --git a/test_automation_framework/helpers/jobs/jobs_client.py b/test_automation_framework/helpers/jobs/jobs_client.py index 573a5ed63..632f3d7ce 100644 --- a/test_automation_framework/helpers/jobs/jobs_client.py +++ b/test_automation_framework/helpers/jobs/jobs_client.py @@ -2,6 +2,7 @@ from typing import Any, Dict, List import time import logging +from helpers.constants import AIRFLOW_PIPELINE, PRINT_PIPELINE from helpers.base_client.base_client import BaseClient @@ -22,9 +23,9 @@ def create_job( self, name: str, file_ids: list[int], - pipeline_id: str, - pipeline_engine: str, owners: list[str], + pipeline_id: str = PRINT_PIPELINE, + pipeline_engine: str = AIRFLOW_PIPELINE, datasets: list[int] | None = None, categories: list[str] | None = None, annotators: list[str] | None = None, diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index b4a938d3b..7fc1840ee 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -7,6 +7,7 @@ import pytest from helpers.base_client.base_client import HTTPError +from helpers.constants import OWNER_UUID logger = getLogger(__name__) @@ -162,8 +163,7 @@ def test_upload_empty_file(self, file_client, tmp_path, content): empty_file.write_text(content) with pytest.raises(HTTPError) as exc: file_client.upload_file(str(empty_file)) - assert exc.value.status_code == 500 - assert "Internal Server Error" in exc.value.body + assert exc.value.status_code == 400 def test_move_file(self, file_tracker, dataset_tracker, tmp_path): created_datasets, dataset_client = dataset_tracker @@ -235,11 +235,8 @@ def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, j job_name = f"test_job_{uuid.uuid4().hex[:8]}" create_resp = jobs_client.create_job( name=job_name, - pipeline_id="print", file_ids=[file_id], - datasets=[], - owners=["0dc326e4-b190-4881-8d05-12359052abbf"], - pipeline_engine="airflow", + owners=[OWNER_UUID], ) job_tracker[0].append(create_resp) job_id = create_resp.get("id") From a857a40d27993b421ba7a5863d9cc899c974524d Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 26 Aug 2025 16:00:07 +0200 Subject: [PATCH 11/37] added categories page test --- test_automation_framework/conftest.py | 6 + .../helpers/category/categories.py | 113 ++++++++++++++++++ .../tests/test_base_api.py | 19 +++ 3 files changed, 138 insertions(+) create mode 100644 test_automation_framework/helpers/category/categories.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index 53bf911d1..ae020233c 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -11,6 +11,7 @@ from helpers.files.file_client import FileClient from helpers.jobs.jobs_client import JobsClient from helpers.menu.menu_client import MenuClient +from helpers.category.categories import CategoriesClient logger = getLogger(__name__) @@ -71,6 +72,11 @@ def jobs_client(settings, access_token, tenant) -> JobsClient: return JobsClient(settings.BASE_URL, access_token, tenant) +@pytest.fixture +def categories_client(settings, access_token, tenant) -> CategoriesClient: + return CategoriesClient(settings.BASE_URL, access_token, tenant) + + @pytest.fixture def dataset_tracker(dataset_client): created: list[str] = [] diff --git a/test_automation_framework/helpers/category/categories.py b/test_automation_framework/helpers/category/categories.py new file mode 100644 index 000000000..12760608e --- /dev/null +++ b/test_automation_framework/helpers/category/categories.py @@ -0,0 +1,113 @@ +from __future__ import annotations +from typing import List, Optional +from pydantic import BaseModel +import logging +from helpers.base_client.base_client import BaseClient + +logger = logging.getLogger(__name__) + + +class CategoryParent(BaseModel): + name: str + id: str + type: str + metadata: dict + parent: Optional[str] = None + data_attributes: List[dict] = [] + is_leaf: Optional[bool] = None + + +class Category(BaseModel): + id: str + name: str + type: str + metadata: dict + parent: Optional[str] = None + data_attributes: List[dict] = [] + parents: List[CategoryParent] = [] + is_leaf: bool + + +class Pagination(BaseModel): + page_num: int + page_offset: int + page_size: int + min_pages_left: int + total: int + has_more: bool + + +class CategoriesResponse(BaseModel): + pagination: Pagination + data: List[Category] + + +class CategoryCreateResponse(BaseModel): + id: str + name: str + type: str + metadata: dict + parent: Optional[str] = None + data_attributes: list[dict] = [] + editor: Optional[str] = None + parents: Optional[list[dict]] = None + is_leaf: Optional[bool] = None + + +class CategoriesClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def search_categories( + self, + page_num: int = 1, + page_size: int = 15, + filters: list[dict] | None = None, + sorting: list[dict] | None = None, + ) -> CategoriesResponse: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters or [], + "sorting": sorting or [{"direction": "desc", "field": "name"}], + } + + resp = self.post_json( + "/annotation/categories/search", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + return CategoriesResponse.model_validate(resp) + + def create_category( + self, + category_id: str, + name: str, + category_type: str = "box", + parent: str | None = None, + metadata: dict | None = None, + data_attributes: list[dict] | None = None, + ) -> CategoryCreateResponse: + payload = { + "id": category_id, + "name": name, + "type": category_type, + "parent": parent, + "metadata": metadata or {"color": "#67DE61"}, + "data_attributes": data_attributes or [], + } + resp = self.post_json( + "/annotation/categories", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + return CategoryCreateResponse.model_validate(resp) + + def delete_category(self, category_id: str) -> dict: + payload = {"id": category_id} + resp = self.delete_json( + "/annotation/categories", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + logger.info(f"Deleted category {category_id}") + return resp diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index 7fc1840ee..f9efdb1c7 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -246,3 +246,22 @@ def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, j assert str(status).lower() in {"finished", "success", "completed"} job_files = final_job.get("files") or [] assert file_id in job_files + + +class TestCategories: + @pytest.mark.skip(reason="Creation works, but deletion not implemented, will be cluttered by multiple runs") + def test_create_and_delete_category(self, auth_token, settings, tenant, categories_client): + access_token, _ = auth_token + + unique_id = f"test_cat_{uuid.uuid4().hex[:6]}" + created = categories_client.create_category(category_id=unique_id, name=unique_id, parent="example") + assert created.id == unique_id + search_result = categories_client.search_categories(page_size=100) + ids = [c.id for c in search_result.data] + assert unique_id in ids, f"Category {unique_id} not found after creation" + + deleted = categories_client.delete_category(unique_id) + assert deleted.get("detail") or deleted.get("status") or "success" in str(deleted).lower() + search_after_delete = categories_client.search_categories(page_size=100) + ids_after = [c.id for c in search_after_delete.data] + assert unique_id not in ids_after, f"Category {unique_id} still present after deletion" From 2b8887158e63017d39bb7c6c31f4a921aa4210b1 Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 26 Aug 2025 18:07:43 +0200 Subject: [PATCH 12/37] added users helper, removed uuid from constants --- test_automation_framework/conftest.py | 8 ++++ .../helpers/constants.py | 1 - .../helpers/users/users.py | 37 +++++++++++++++++++ .../tests/test_base_api.py | 5 +-- 4 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 test_automation_framework/helpers/users/users.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index 53bf911d1..e4095653d 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -11,6 +11,7 @@ from helpers.files.file_client import FileClient from helpers.jobs.jobs_client import JobsClient from helpers.menu.menu_client import MenuClient +from helpers.users.users import UsersClient logger = getLogger(__name__) @@ -71,6 +72,13 @@ def jobs_client(settings, access_token, tenant) -> JobsClient: return JobsClient(settings.BASE_URL, access_token, tenant) +@pytest.fixture +def user_uuid(settings, access_token, tenant) -> str: + users_client = UsersClient(settings.BASE_URL, access_token, tenant) + users = users_client.search_users() + return next((u.id for u in users if u.username == "admin"), None) + + @pytest.fixture def dataset_tracker(dataset_client): created: list[str] = [] diff --git a/test_automation_framework/helpers/constants.py b/test_automation_framework/helpers/constants.py index d1e688451..fae538694 100644 --- a/test_automation_framework/helpers/constants.py +++ b/test_automation_framework/helpers/constants.py @@ -1,3 +1,2 @@ -OWNER_UUID = "0dc326e4-b190-4881-8d05-12359052abbf" AIRFLOW_PIPELINE = "airflow" PRINT_PIPELINE = "print" diff --git a/test_automation_framework/helpers/users/users.py b/test_automation_framework/helpers/users/users.py new file mode 100644 index 000000000..3222d371f --- /dev/null +++ b/test_automation_framework/helpers/users/users.py @@ -0,0 +1,37 @@ +from __future__ import annotations +from typing import Any, Dict, List, Optional +from pydantic import BaseModel + +from helpers.base_client.base_client import BaseClient + + +class UserAccess(BaseModel): + manageGroupMembership: bool + view: bool + mapRoles: bool + impersonate: bool + manage: bool + + +class UserResponse(BaseModel): + id: str + username: str + enabled: bool + email: Optional[str] = None + emailVerified: Optional[bool] = None + firstName: Optional[str] = None + lastName: Optional[str] = None + attributes: Optional[Dict[str, Any]] = None + access: Optional[UserAccess] = None + + +class UsersClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def search_users(self, filters: Optional[List[Dict[str, Any]]] = None) -> List[UserResponse]: + payload = {"filters": filters or []} + resp = self.post_json( + "/users/users/search", json=payload, headers=self._default_headers(content_type_json=True) + ) + return [UserResponse.model_validate(u) for u in resp] diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index 7fc1840ee..dca7df475 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -7,7 +7,6 @@ import pytest from helpers.base_client.base_client import HTTPError -from helpers.constants import OWNER_UUID logger = getLogger(__name__) @@ -213,7 +212,7 @@ def test_move_file(self, file_tracker, dataset_tracker, tmp_path): class TestJobs: - def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path): + def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid): created_files, file_client = file_tracker created_datasets, dataset_client = dataset_tracker data_dir = Path(__file__).parent.parent / "data" @@ -236,7 +235,7 @@ def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, j create_resp = jobs_client.create_job( name=job_name, file_ids=[file_id], - owners=[OWNER_UUID], + owners=[user_uuid], ) job_tracker[0].append(create_resp) job_id = create_resp.get("id") From bd6de0ce28f0cc5e8f49b78acff4ae85c6f75100 Mon Sep 17 00:00:00 2001 From: asobolev Date: Wed, 27 Aug 2025 12:03:08 +0200 Subject: [PATCH 13/37] reports page test --- test_automation_framework/conftest.py | 6 ++++ .../helpers/reports/reports_client.py | 30 +++++++++++++++++++ .../tests/test_base_api.py | 11 +++++++ 3 files changed, 47 insertions(+) create mode 100644 test_automation_framework/helpers/reports/reports_client.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index e4095653d..b124cac38 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -12,6 +12,7 @@ from helpers.jobs.jobs_client import JobsClient from helpers.menu.menu_client import MenuClient from helpers.users.users import UsersClient +from helpers.reports.reports_client import ReportsClient logger = getLogger(__name__) @@ -72,6 +73,11 @@ def jobs_client(settings, access_token, tenant) -> JobsClient: return JobsClient(settings.BASE_URL, access_token, tenant) +@pytest.fixture +def reports_client(settings, access_token, tenant) -> ReportsClient: + return ReportsClient(settings.BASE_URL, access_token, tenant) + + @pytest.fixture def user_uuid(settings, access_token, tenant) -> str: users_client = UsersClient(settings.BASE_URL, access_token, tenant) diff --git a/test_automation_framework/helpers/reports/reports_client.py b/test_automation_framework/helpers/reports/reports_client.py new file mode 100644 index 000000000..69ea929ce --- /dev/null +++ b/test_automation_framework/helpers/reports/reports_client.py @@ -0,0 +1,30 @@ +from typing import List +import logging +from helpers.base_client.base_client import BaseClient + +logger = logging.getLogger(__name__) + + +class ReportsClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def export_tasks( + self, + user_ids: List[str], + date_from: str, + date_to: str, + ) -> str: + payload = { + "user_ids": user_ids, + "date_from": date_from, + "date_to": date_to, + } + resp = self.post( + "/annotation/tasks/export", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + resp.raise_for_status() + logger.info(f"Exported tasks for users={user_ids} from {date_from} to {date_to}") + return resp.text diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index dca7df475..a777dd3c0 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -245,3 +245,14 @@ def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, j assert str(status).lower() in {"finished", "success", "completed"} job_files = final_job.get("files") or [] assert file_id in job_files + + +class TestReports: + def test_export_tasks_csv(self, reports_client, user_uuid): + csv_text = reports_client.export_tasks( + user_ids=[user_uuid], + date_from="2025-05-01 00:00:00", + date_to="2025-08-31 00:00:00", + ) + assert "annotator_id" in csv_text + assert "task_id" in csv_text From 3ad3e1a520c5fcfabe15779c16de9e97953b1b38 Mon Sep 17 00:00:00 2001 From: asobolev Date: Thu, 28 Aug 2025 13:17:51 +0200 Subject: [PATCH 14/37] added plugin tests --- test_automation_framework/conftest.py | 18 +++++ .../helpers/base_client/base_client.py | 25 +++++- .../helpers/plugins/plugins_client.py | 78 +++++++++++++++++++ .../tests/test_base_api.py | 71 +++++++++++++++++ 4 files changed, 189 insertions(+), 3 deletions(-) create mode 100644 test_automation_framework/helpers/plugins/plugins_client.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index b124cac38..953f21bf6 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -13,6 +13,7 @@ from helpers.menu.menu_client import MenuClient from helpers.users.users import UsersClient from helpers.reports.reports_client import ReportsClient +from helpers.plugins.plugins_client import PluginsClient logger = getLogger(__name__) @@ -78,6 +79,11 @@ def reports_client(settings, access_token, tenant) -> ReportsClient: return ReportsClient(settings.BASE_URL, access_token, tenant) +@pytest.fixture +def plugins_client(settings, access_token, tenant) -> PluginsClient: + return PluginsClient(settings.BASE_URL, access_token, tenant) + + @pytest.fixture def user_uuid(settings, access_token, tenant) -> str: users_client = UsersClient(settings.BASE_URL, access_token, tenant) @@ -124,3 +130,15 @@ def job_tracker(jobs_client): logger.info(f"[job_tracker] Cancelled job {job_id}") except Exception as e: logger.warning(f"[job_tracker] Could not cancel job {job_id}: {e}") + + +@pytest.fixture +def plugins_tracker(plugins_client): + created: list[int] = [] + yield created, plugins_client + for id in created: + try: + plugins_client.delete_plugin(plugin_id=id) + logger.info(f"[plugins_tracker] Deleted plugin {id}") + except Exception as e: + logger.warning(f"[plugins_tracker] Failed to delete plugin {id}: {e}") diff --git a/test_automation_framework/helpers/base_client/base_client.py b/test_automation_framework/helpers/base_client/base_client.py index 69a476f35..b21a77548 100644 --- a/test_automation_framework/helpers/base_client/base_client.py +++ b/test_automation_framework/helpers/base_client/base_client.py @@ -18,6 +18,12 @@ def __init__( self.status_code = status_code self.body = body + def __str__(self): + base = super().__str__() + if self.body: + return f"{base}\nResponse body: {self.body}" + return base + class BaseClient: def __init__( @@ -51,6 +57,13 @@ def _request(self, method: str, path: str, headers: dict | None = None, **kwargs rel_path = path if path.startswith("/") else "/" + path start = time.perf_counter() merged_headers = {**self._default_headers(), **(headers or {})} + + # Log the request details for debugging + logger.debug(f"Making {method} request to {self.base_url}{rel_path}") + logger.debug(f"Headers: {merged_headers}") + if "json" in kwargs: + logger.debug(f"JSON payload: {kwargs['json']}") + try: resp = self._client.request(method, rel_path, headers=merged_headers, **kwargs) resp.raise_for_status() @@ -60,13 +73,19 @@ def _request(self, method: str, path: str, headers: dict | None = None, **kwargs return resp except httpx.HTTPStatusError as exc: resp = exc.response + error_body = resp.text logger.error( - f"Bad response: {resp.status_code} for {method} {self.base_url}{rel_path} - body: {resp.text[:500]}" + f"Bad response: {resp.status_code} for {method} {self.base_url}{rel_path} - body: {error_body[:500]}" ) + # Create a more informative error message + error_message = f"{method} {self.base_url}{rel_path} -> {resp.status_code}" + if error_body: + error_message += f"\nServer response: {error_body}" + raise HTTPError( - f"{method} {self.base_url}{rel_path} -> {resp.status_code}", + error_message, status_code=resp.status_code, - body=resp.text, + body=error_body, ) from exc except httpx.RequestError as exc: logger.exception(f"Request failed: {method} {self.base_url}{rel_path}") diff --git a/test_automation_framework/helpers/plugins/plugins_client.py b/test_automation_framework/helpers/plugins/plugins_client.py new file mode 100644 index 000000000..1c31d5c09 --- /dev/null +++ b/test_automation_framework/helpers/plugins/plugins_client.py @@ -0,0 +1,78 @@ +from typing import Any, Dict, List +import logging +from helpers.base_client.base_client import BaseClient + + +logger = logging.getLogger(__name__) + + +class PluginsClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def get_plugins(self) -> List[Dict[str, Any]]: + return self.get_json("/core/plugins", headers=self._default_headers()) + + def create_plugin( + self, + name: str, + menu_name: str, + url: str, + version: str = "1", + description: str = "", + is_iframe: bool = True, + ) -> dict: + payload = { + "name": name, + "menu_name": menu_name, + "description": description, + "version": version, + "url": url, + "is_iframe": is_iframe, + } + + # Enhanced headers to match the successful request exactly + headers = self._default_headers(content_type_json=True) + headers.update( + { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate", + "Accept-Language": "en-US,en;q=0.5", + "Connection": "keep-alive", + "DNT": "1", + "Origin": "http://demo.badgerdoc.com:8083", + "Priority": "u=0", + "Referer": "http://demo.badgerdoc.com:8083/", + "Sec-GPC": "1", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:138.0) Gecko/20100101 Firefox/138.0", + } + ) + + # Log the request for debugging + logger.info(f"Creating plugin with payload: {payload}") + logger.info(f"Using headers: {headers}") + + try: + return self.post_json( + "/core/plugins", + json=payload, + headers=headers, + ) + except Exception as e: + logger.error(f"Failed to create plugin: {e}") + if hasattr(e, "body"): + logger.error(f"Response body: {e.body}") + raise + + def update_plugin(self, plugin_id: int, **fields) -> dict: + return self.put_json( + f"/core/plugins/{plugin_id}", + json=fields, + headers=self._default_headers(content_type_json=True), + ) + + def delete_plugin(self, plugin_id: int) -> dict: + return self.delete_json( + f"/core/plugins/{plugin_id}", + headers=self._default_headers(content_type_json=True), + ) diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index a777dd3c0..ecccdfc10 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -256,3 +256,74 @@ def test_export_tasks_csv(self, reports_client, user_uuid): ) assert "annotator_id" in csv_text assert "task_id" in csv_text + + @pytest.mark.parametrize( + "date_from,date_to", + [ + ("2028-05-01 00:00:00", "2028-08-31 00:00:00"), + ("1900-01-01 00:00:00", "1900-12-31 00:00:00"), + ("2025-09-01 00:00:00", "2025-08-01 00:00:00"), + ], + ) + def test_export_tasks_wrong_date(self, reports_client, user_uuid, date_from, date_to): + with pytest.raises(HTTPError) as exc: + reports_client.export_tasks( + user_ids=[user_uuid], + date_from=date_from, + date_to=date_to, + ) + assert exc.value.status_code == 406 + + +class TestPlugins: + def test_create_and_delete_plugin(self, plugins_tracker): + created, plugins_client = plugins_tracker + unique_name = f"plugin_{uuid.uuid4().hex[:8]}" + resp = plugins_client.create_plugin( + name=unique_name, + menu_name=unique_name, + description="bar", + version="1", + url="http://what.com/what", + is_iframe=True, + ) + plugin_id = resp["id"] + created.append(plugin_id) + + plugins = plugins_client.get_plugins() + assert any(p["id"] == plugin_id for p in plugins) + assert any(p["name"] == unique_name for p in plugins) + + plugins_client.delete_plugin(plugin_id) + + plugins = plugins_client.get_plugins() + assert not any(p["id"] == plugin_id for p in plugins) + + def test_update_plugin(self, plugins_tracker): + created, plugins_client = plugins_tracker + unique_name = f"plugin_{uuid.uuid4().hex[:8]}" + resp = plugins_client.create_plugin( + name=unique_name, + menu_name=unique_name, + description="bar", + version="1", + url="http://what.com/what", + is_iframe=True, + ) + plugin_id = resp["id"] + created.append(plugin_id) + + updated_payload = { + "name": unique_name, + "menu_name": unique_name, + "description": "updated desc", + "version": "1", + "url": "http://what.com/what", + "is_iframe": True, + } + update_resp = plugins_client.update_plugin(plugin_id, **updated_payload) + assert update_resp["description"] == "updated desc" + + plugins = plugins_client.get_plugins() + updated = next(p for p in plugins if p["id"] == plugin_id) + assert updated["description"] == "updated desc" From 93916447238ec8feb5216ba4fb900acc502f7522 Mon Sep 17 00:00:00 2001 From: asobolev Date: Thu, 28 Aug 2025 17:30:44 +0200 Subject: [PATCH 15/37] added more tests for datasets and files --- .../helpers/files/file_client.py | 15 +++ .../tests/test_base_api.py | 112 +++++++++++++----- 2 files changed, 95 insertions(+), 32 deletions(-) diff --git a/test_automation_framework/helpers/files/file_client.py b/test_automation_framework/helpers/files/file_client.py index 5a79e9e58..bee3869d8 100644 --- a/test_automation_framework/helpers/files/file_client.py +++ b/test_automation_framework/helpers/files/file_client.py @@ -2,6 +2,9 @@ from helpers.base_client.base_client import BaseClient import logging from typing import List +import shutil +import uuid +from pathlib import Path logger = logging.getLogger(__name__) @@ -48,3 +51,15 @@ def move_files(self, name: str, objects: list) -> dict: ) logger.info(f"Moved object {objects} to the dataset {name}") return resp + + @staticmethod + def upload_temp_file(client, file_tracker, tmp_path, suffix="pdf"): + data_dir = Path(__file__).parent.parent.parent / "data" + original_file = data_dir / "multivitamin.pdf" + unique_name = f"{uuid.uuid4().hex}.{suffix}" + temp_file = tmp_path / unique_name + shutil.copy(original_file, temp_file) + result = client.upload_file(str(temp_file)) + file_info = result[0] + file_tracker[0].append(file_info) + return file_info, temp_file diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index ecccdfc10..88cc7175c 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -51,7 +51,7 @@ def test_menu(self, menu_client): class TestDatasets: - def test_search_basic(self, dataset_client): + def test_clear_search_for_datasets(self, dataset_client): result = dataset_client.search() assert "pagination" in result assert "data" in result @@ -124,19 +124,41 @@ def test_create_duplicate_dataset(self, dataset_tracker): assert exc.value.status_code == 400 assert "already exists" in exc.value.body.lower() + def test_search_existing_dataset(self, dataset_tracker): + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + + search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + names = [d["name"] for d in search_resp["data"]] + assert dataset_name in names + + def test_search_non_existing_dataset(self, dataset_client): + search_resp = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": "non_existing_dataset"}] + ) + assert search_resp["data"] == [] + + def test_search_multiple_existing_datasets(self, dataset_tracker): + created, client = dataset_tracker + names = [f"autotest_{uuid.uuid4().hex[:8]}" for _ in range(2)] + for n in names: + resp = client.create_dataset(name=n) + created.append(n) + assert "successfully created" in resp["detail"].lower() + + search_resp = client.search(filters=[{"field": "name", "operator": "in", "value": names}]) + found_names = {d["name"] for d in search_resp["data"]} + assert set(names) <= found_names + class TestFiles: def test_upload_and_delete_file(self, file_tracker, tmp_path): created_files, client = file_tracker - data_dir = Path(__file__).parent.parent / "data" - original_file = data_dir / "multivitamin.pdf" - unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" - temp_file = tmp_path / unique_name - shutil.copy(original_file, temp_file) try: - result = client.upload_file(str(temp_file)) - assert isinstance(result, list) - file_info = result[0] + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) assert file_info["status"] is True assert "id" in file_info assert "file_name" in file_info @@ -166,42 +188,35 @@ def test_upload_empty_file(self, file_client, tmp_path, content): def test_move_file(self, file_tracker, dataset_tracker, tmp_path): created_datasets, dataset_client = dataset_tracker + first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + first_resp = dataset_client.create_dataset(name=first_dataset_name) created_datasets.append(first_dataset_name) assert "successfully created" in first_resp["detail"].lower() - datasets = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}])[ - "data" - ] - assert len(datasets) == 1 - first_dataset_id = datasets[0]["id"] + first_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}] + )["data"][0]["id"] + second_resp = dataset_client.create_dataset(name=second_dataset_name) created_datasets.append(second_dataset_name) - datasets = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}])[ - "data" - ] - assert len(datasets) == 1 - second_dataset_id = datasets[0]["id"] assert "successfully created" in second_resp["detail"].lower() - created_files, file_client = file_tracker - data_dir = Path(__file__).parent.parent / "data" - original_file = data_dir / "multivitamin.pdf" - unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" - temp_file = tmp_path / unique_name - shutil.copy(original_file, temp_file) + second_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}] + )["data"][0]["id"] + + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_files.append(file_info) + file_id = file_info["id"] try: - result = file_client.upload_file(str(temp_file)) - file_info = result[0] - assert file_info["status"] is True - created_files.append(file_info) - file_id = file_info["id"] - move1 = file_client.move_files(name=first_dataset_name, objects=[file_id])[0] + move1 = client.move_files(name=first_dataset_name, objects=[file_id])[0] assert move1["status"] is True assert "successfully bounded" in move1["message"].lower() files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] assert any(f["id"] == file_id for f in files_in_first) - move2 = file_client.move_files(name=second_dataset_name, objects=[file_id])[0] + move2 = client.move_files(name=second_dataset_name, objects=[file_id])[0] assert move2["status"] is True assert "successfully bounded" in move2["message"].lower() files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] @@ -210,6 +225,39 @@ def test_move_file(self, file_tracker, dataset_tracker, tmp_path): if temp_file.exists(): temp_file.unlink() + def test_search_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + try: + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + search_resp = client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": file_info["file_name"]}] + ) + names = [f["original_name"] for f in search_resp["data"]] + assert file_info["file_name"] in names + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_search_non_existing_file(self, file_client): + search_resp = file_client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": "definitely_not_a_file.pdf"}] + ) + assert search_resp["data"] == [] + + def test_search_multiple_existing_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + f1, t1 = client.upload_temp_file(client, file_tracker, tmp_path) + f2, t2 = client.upload_temp_file(client, file_tracker, tmp_path) + names = [f1["file_name"], f2["file_name"]] + + search = client.search_files(filters=[{"field": "original_name", "operator": "in", "value": names}]) + found_names = {f["original_name"] for f in search["data"]} + assert set(names) <= found_names + + t1.unlink(missing_ok=True) + t2.unlink(missing_ok=True) + class TestJobs: def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid): From 2b1da0378750be3eb922ab1bb1f3a9e425aaf79e Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 29 Aug 2025 16:49:10 +0200 Subject: [PATCH 16/37] added more jobs tests --- .../tests/test_base_api.py | 115 +++++++++++++++--- 1 file changed, 95 insertions(+), 20 deletions(-) diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index fdff9a855..be06073c0 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -1,11 +1,10 @@ from logging import getLogger -from datetime import datetime -from pathlib import Path +from datetime import datetime, timedelta import uuid -import shutil import pytest + from helpers.base_client.base_client import HTTPError logger = getLogger(__name__) @@ -260,29 +259,22 @@ def test_search_multiple_existing_files(self, file_tracker, tmp_path): class TestJobs: - def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid): - created_files, file_client = file_tracker + def test_create_and_poll_job( + self, file_client, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid + ): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) created_datasets, dataset_client = dataset_tracker - data_dir = Path(__file__).parent.parent / "data" - original_file = data_dir / "multivitamin.pdf" - unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" - tmp_file = tmp_path / unique_name - shutil.copy(original_file, tmp_file) - upload_result = file_client.upload_file(str(tmp_file)) - file_info = upload_result[0] - assert file_info["status"] is True - created_files.append(file_info) - file_id = file_info["id"] + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" - resp = dataset_client.create_dataset(name=dataset_name) + dataset_client.create_dataset(name=dataset_name) created_datasets.append(dataset_name) - assert "successfully created" in resp["detail"].lower() - move_resp = file_client.move_files(name=dataset_name, objects=[file_id])[0] + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] assert move_resp["status"] is True job_name = f"test_job_{uuid.uuid4().hex[:8]}" create_resp = jobs_client.create_job( name=job_name, - file_ids=[file_id], + file_ids=[file_info["id"]], owners=[user_uuid], ) job_tracker[0].append(create_resp) @@ -292,7 +284,90 @@ def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, j status = final_job.get("status") assert str(status).lower() in {"finished", "success", "completed"} job_files = final_job.get("files") or [] - assert file_id in job_files + assert file_info["id"] in job_files + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # descending name sorting works weird + def test_sorting(self, jobs_client, field, direction): + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [], + "sorting": [{"direction": direction, "field": field}], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + data = resp["data"] + values = [d[field] for d in data if field in d and d[field] is not None] + + if field in {"creation_datetime", "deadline"}: + values = [datetime.fromisoformat(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + def test_job_search(self, jobs_client, job_tracker, file_tracker, dataset_tracker, user_uuid, tmp_path, field): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_id = create_resp.get("id") + jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) + job_tracker[0].append(create_resp) + search_value = create_resp.get(field, None) + + filters = [ + {"field": field, "operator": "eq", "value": search_value}, + {"field": "name", "operator": "eq", "value": job_name}, + ] + + search_resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 100}, + "filters": filters, + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + job_ids = [j["id"] for j in search_resp["data"]] + assert job_id in job_ids + + @pytest.mark.parametrize("field", ["creation_datetime", "deadline"]) + def test_date_range_filter(self, jobs_client, field): + start = (datetime.utcnow() - timedelta(days=365)).replace(microsecond=0).isoformat() + end = (datetime.utcnow() + timedelta(days=365)).replace(microsecond=0).isoformat() + + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [ + {"field": field, "operator": "ge", "value": start}, + {"field": field, "operator": "le", "value": end}, + ], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + for job in data: + if field in job and job[field] is not None: + date_val = datetime.fromisoformat(job[field]) + assert datetime.fromisoformat(start) <= date_val <= datetime.fromisoformat(end) class TestCategories: From 1723cb54bcd81e04b334182d06a0c3b2f1654eb4 Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 29 Aug 2025 16:49:10 +0200 Subject: [PATCH 17/37] added more jobs tests --- .../tests/test_base_api.py | 115 +++++++++++++++--- 1 file changed, 95 insertions(+), 20 deletions(-) diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index 88cc7175c..b1b4de485 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -1,11 +1,10 @@ from logging import getLogger -from datetime import datetime -from pathlib import Path +from datetime import datetime, timedelta import uuid -import shutil import pytest + from helpers.base_client.base_client import HTTPError logger = getLogger(__name__) @@ -260,29 +259,22 @@ def test_search_multiple_existing_files(self, file_tracker, tmp_path): class TestJobs: - def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid): - created_files, file_client = file_tracker + def test_create_and_poll_job( + self, file_client, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid + ): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) created_datasets, dataset_client = dataset_tracker - data_dir = Path(__file__).parent.parent / "data" - original_file = data_dir / "multivitamin.pdf" - unique_name = f"{uuid.uuid4().hex}_multivitamin.pdf" - tmp_file = tmp_path / unique_name - shutil.copy(original_file, tmp_file) - upload_result = file_client.upload_file(str(tmp_file)) - file_info = upload_result[0] - assert file_info["status"] is True - created_files.append(file_info) - file_id = file_info["id"] + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" - resp = dataset_client.create_dataset(name=dataset_name) + dataset_client.create_dataset(name=dataset_name) created_datasets.append(dataset_name) - assert "successfully created" in resp["detail"].lower() - move_resp = file_client.move_files(name=dataset_name, objects=[file_id])[0] + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] assert move_resp["status"] is True job_name = f"test_job_{uuid.uuid4().hex[:8]}" create_resp = jobs_client.create_job( name=job_name, - file_ids=[file_id], + file_ids=[file_info["id"]], owners=[user_uuid], ) job_tracker[0].append(create_resp) @@ -292,7 +284,90 @@ def test_create_and_poll_job(self, jobs_client, file_tracker, dataset_tracker, j status = final_job.get("status") assert str(status).lower() in {"finished", "success", "completed"} job_files = final_job.get("files") or [] - assert file_id in job_files + assert file_info["id"] in job_files + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # descending name sorting works weird + def test_sorting(self, jobs_client, field, direction): + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [], + "sorting": [{"direction": direction, "field": field}], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + data = resp["data"] + values = [d[field] for d in data if field in d and d[field] is not None] + + if field in {"creation_datetime", "deadline"}: + values = [datetime.fromisoformat(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + def test_job_search(self, jobs_client, job_tracker, file_tracker, dataset_tracker, user_uuid, tmp_path, field): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_id = create_resp.get("id") + jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) + job_tracker[0].append(create_resp) + search_value = create_resp.get(field, None) + + filters = [ + {"field": field, "operator": "eq", "value": search_value}, + {"field": "name", "operator": "eq", "value": job_name}, + ] + + search_resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 100}, + "filters": filters, + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + job_ids = [j["id"] for j in search_resp["data"]] + assert job_id in job_ids + + @pytest.mark.parametrize("field", ["creation_datetime", "deadline"]) + def test_date_range_filter(self, jobs_client, field): + start = (datetime.utcnow() - timedelta(days=365)).replace(microsecond=0).isoformat() + end = (datetime.utcnow() + timedelta(days=365)).replace(microsecond=0).isoformat() + + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [ + {"field": field, "operator": "ge", "value": start}, + {"field": field, "operator": "le", "value": end}, + ], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + for job in data: + if field in job and job[field] is not None: + date_val = datetime.fromisoformat(job[field]) + assert datetime.fromisoformat(start) <= date_val <= datetime.fromisoformat(end) class TestReports: From fed15c0f0b2b5fe40c8576ce3d6eff3c3bda7ba5 Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 2 Sep 2025 17:54:15 +0200 Subject: [PATCH 18/37] added file download tests --- .../helpers/files/file_client.py | 28 +++++++++++++++++++ .../helpers/plugins/plugins_client.py | 5 ---- .../tests/test_base_api.py | 17 +++++++++++ 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/test_automation_framework/helpers/files/file_client.py b/test_automation_framework/helpers/files/file_client.py index bee3869d8..ca7a33b95 100644 --- a/test_automation_framework/helpers/files/file_client.py +++ b/test_automation_framework/helpers/files/file_client.py @@ -5,6 +5,8 @@ import shutil import uuid from pathlib import Path +import httpx +from helpers.base_client.base_client import HTTPError logger = logging.getLogger(__name__) @@ -61,5 +63,31 @@ def upload_temp_file(client, file_tracker, tmp_path, suffix="pdf"): shutil.copy(original_file, temp_file) result = client.upload_file(str(temp_file)) file_info = result[0] + assert file_info["status"] is True file_tracker[0].append(file_info) return file_info, temp_file + + def download_file(self, file_id: int) -> bytes: + resp = self._client.get( + f"{self.base_url}/assets/download?file_id={file_id}", + headers=self._default_headers(), + follow_redirects=False, + ) + + if resp.status_code >= 400: + raise HTTPError( + f"GET {resp.request.url} -> {resp.status_code}", + status_code=resp.status_code, + body=resp.text, + ) + + if resp.status_code == 302 and "location" in resp.headers: + s3_resp = httpx.get(resp.headers["location"]) + s3_resp.raise_for_status() + return s3_resp.content + + raise HTTPError( + f"Unexpected response {resp.status_code} for file_id={file_id}", + status_code=resp.status_code, + body=resp.text, + ) diff --git a/test_automation_framework/helpers/plugins/plugins_client.py b/test_automation_framework/helpers/plugins/plugins_client.py index 1c31d5c09..9f4d31f04 100644 --- a/test_automation_framework/helpers/plugins/plugins_client.py +++ b/test_automation_framework/helpers/plugins/plugins_client.py @@ -31,7 +31,6 @@ def create_plugin( "is_iframe": is_iframe, } - # Enhanced headers to match the successful request exactly headers = self._default_headers(content_type_json=True) headers.update( { @@ -48,10 +47,6 @@ def create_plugin( } ) - # Log the request for debugging - logger.info(f"Creating plugin with payload: {payload}") - logger.info(f"Using headers: {headers}") - try: return self.post_json( "/core/plugins", diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index b1b4de485..e10001696 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -257,6 +257,23 @@ def test_search_multiple_existing_files(self, file_tracker, tmp_path): t1.unlink(missing_ok=True) t2.unlink(missing_ok=True) + def test_download_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + + content = client.download_file(file_id) + assert isinstance(content, (bytes, bytearray)) + assert len(content) > 100 + assert content.startswith(b"%PDF") + + temp_file.unlink(missing_ok=True) + + def test_download_nonexistent_file(self, file_client): + with pytest.raises(HTTPError) as exc: + file_client.download_file(9999999) + assert exc.value.status_code == 404 + class TestJobs: def test_create_and_poll_job( From ddaffd3c81defa69a6e1ade8f847c60181ebc6e1 Mon Sep 17 00:00:00 2001 From: asobolev Date: Wed, 3 Sep 2025 17:25:42 +0200 Subject: [PATCH 19/37] added invalid format upload, clear search and sorting tests --- .../tests/test_base_api.py | 65 ++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py index b4a2f9b0d..e86a14d3f 100644 --- a/test_automation_framework/tests/test_base_api.py +++ b/test_automation_framework/tests/test_base_api.py @@ -64,7 +64,6 @@ def test_clear_search_for_datasets(self, dataset_client): assert isinstance(dataset["id"], int) assert isinstance(dataset["name"], str) assert isinstance(dataset["count"], int) - datetime.fromisoformat(dataset["created"]) def test_search_sorting(self, dataset_client): result = dataset_client.search(sorting=[{"direction": "desc", "field": "name"}]) @@ -176,6 +175,15 @@ def test_upload_and_delete_file(self, file_tracker, tmp_path): if temp_file.exists(): temp_file.unlink() + def test_upload_invalid_format(self, file_client, tmp_path): + invalid_file = tmp_path / f"{uuid.uuid4().hex}.py" + invalid_file.write_text("this is py file") + + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(invalid_file)) + + assert exc.value.status_code == 400 + @pytest.mark.skip(reason="Uploads a file, but returns 500") @pytest.mark.parametrize("content", ["", " "]) def test_upload_empty_file(self, file_client, tmp_path, content): @@ -224,6 +232,35 @@ def test_move_file(self, file_tracker, dataset_tracker, tmp_path): if temp_file.exists(): temp_file.unlink() + def test_clear_search_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + result = client.search_files() + assert "pagination" in result + assert "data" in result + assert isinstance(result["data"], list) + pagination = result["pagination"] + required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} + assert required_pagination_keys <= pagination.keys() + for file in result["data"]: + required_file_keys = { + "id", + "original_name", + "bucket", + "size_in_bytes", + "extension", + "original_ext", + "content_type", + "pages", + "last_modified", + "status", + "path", + "datasets", + } + assert required_file_keys <= file.keys() + assert isinstance(file["id"], int) + assert isinstance(file["original_name"], str) + assert isinstance(file["size_in_bytes"], int) + def test_search_existing_file(self, file_tracker, tmp_path): created_files, client = file_tracker try: @@ -274,6 +311,32 @@ def test_download_nonexistent_file(self, file_client): file_client.download_file(9999999) assert exc.value.status_code == 404 + @pytest.mark.parametrize("field", ["original_name", "last_modified", "size_in_bytes"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # name descending fails + def test_files_sorting(self, file_client, field, direction): + resp = file_client.post_json( + "/assets/files/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [{"field": "original_name", "operator": "ilike", "value": "%%"}], + "sorting": [{"direction": direction, "field": field}], + }, + headers=file_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + values = [d[field] for d in data if field in d] + + if field == "last_modified": + values = [datetime.fromisoformat(v) for v in values] + + if field == "size_in_bytes": + values = [int(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected, f"{field} not sorted {direction}" + class TestJobs: def test_create_and_poll_job( From 3678f205b8f00f4c4aa150d4d3a73da7eafb3e42 Mon Sep 17 00:00:00 2001 From: asobolev Date: Wed, 3 Sep 2025 17:37:17 +0200 Subject: [PATCH 20/37] reformatted tests structure --- test_automation_framework/tests/test_auth.py | 26 + .../tests/test_base_api.py | 551 ------------------ .../tests/test_categories.py | 25 + .../tests/test_datasets.py | 112 ++++ test_automation_framework/tests/test_files.py | 196 +++++++ test_automation_framework/tests/test_jobs.py | 120 ++++ test_automation_framework/tests/test_other.py | 24 + .../tests/test_plugins.py | 58 ++ .../tests/test_reports.py | 36 ++ 9 files changed, 597 insertions(+), 551 deletions(-) create mode 100644 test_automation_framework/tests/test_auth.py delete mode 100644 test_automation_framework/tests/test_base_api.py create mode 100644 test_automation_framework/tests/test_categories.py create mode 100644 test_automation_framework/tests/test_datasets.py create mode 100644 test_automation_framework/tests/test_files.py create mode 100644 test_automation_framework/tests/test_jobs.py create mode 100644 test_automation_framework/tests/test_other.py create mode 100644 test_automation_framework/tests/test_plugins.py create mode 100644 test_automation_framework/tests/test_reports.py diff --git a/test_automation_framework/tests/test_auth.py b/test_automation_framework/tests/test_auth.py new file mode 100644 index 000000000..a8601873e --- /dev/null +++ b/test_automation_framework/tests/test_auth.py @@ -0,0 +1,26 @@ +from logging import getLogger + +import pytest + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestAuthAPI: + def test_basic_auth(self, auth_token): + access_token, refresh_token = auth_token + assert access_token + assert refresh_token + + def test_wrong_creds(self, auth_service): + with pytest.raises(HTTPError) as exc: + auth_service.get_token("wrong", "wrong") + assert exc.value.status_code == 401 + + def test_refresh_token(self, auth_token, auth_service): + access_token, refresh_token = auth_token + new_access, new_refresh = auth_service.refresh_token(refresh_token=refresh_token) + assert new_access != access_token + assert new_refresh != refresh_token diff --git a/test_automation_framework/tests/test_base_api.py b/test_automation_framework/tests/test_base_api.py deleted file mode 100644 index e86a14d3f..000000000 --- a/test_automation_framework/tests/test_base_api.py +++ /dev/null @@ -1,551 +0,0 @@ -from logging import getLogger -from datetime import datetime, timedelta -import uuid - -import pytest - - -from helpers.base_client.base_client import HTTPError - -logger = getLogger(__name__) - - -class TestAuthAPI: - def test_basic_auth(self, auth_token): - access_token, refresh_token = auth_token - assert access_token - assert refresh_token - - def test_wrong_creds(self, auth_service): - with pytest.raises(HTTPError) as exc: - auth_service.get_token("wrong", "wrong") - assert exc.value.status_code == 401 - - def test_refresh_token(self, auth_token, auth_service): - access_token, refresh_token = auth_token - new_access, new_refresh = auth_service.refresh_token(refresh_token=refresh_token) - assert new_access != access_token - assert new_refresh != refresh_token - - -class TestAPI: - def test_menu(self, menu_client): - menu = menu_client.get_menu() - assert isinstance(menu, list) - assert menu - required_keys = {"name", "badgerdoc_path", "is_external", "is_iframe", "url", "children"} - for item in menu: - assert required_keys <= item.keys() - first_item = menu[0] - assert isinstance(first_item["name"], str) - assert isinstance(first_item["badgerdoc_path"], str) - assert isinstance(first_item["is_external"], bool) - assert isinstance(first_item["children"], (list, type(None))) - expected_names = {"Documents", "My Tasks", "Jobs", "Settings"} - actual_names = {item["name"] for item in menu} - assert expected_names <= actual_names - settings_item = next(i for i in menu if i["name"] == "Settings") - assert isinstance(settings_item["children"], list) - assert any(child["name"] == "Keycloak" for child in settings_item["children"]) - - -class TestDatasets: - def test_clear_search_for_datasets(self, dataset_client): - result = dataset_client.search() - assert "pagination" in result - assert "data" in result - assert isinstance(result["data"], list) - pagination = result["pagination"] - required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} - assert required_pagination_keys <= pagination.keys() - for dataset in result["data"]: - required_dataset_keys = {"id", "name", "count", "created"} - assert required_dataset_keys <= dataset.keys() - assert isinstance(dataset["id"], int) - assert isinstance(dataset["name"], str) - assert isinstance(dataset["count"], int) - - def test_search_sorting(self, dataset_client): - result = dataset_client.search(sorting=[{"direction": "desc", "field": "name"}]) - names = [d["name"] for d in result["data"]] - assert names == sorted(names, reverse=True) - - def test_search_pagination(self, dataset_client): - result = dataset_client.search(page_num=1, page_size=15) - assert len(result["data"]) <= 15 - assert result["pagination"]["page_num"] == 1 - - def test_selection(self, dataset_client): - datasets = dataset_client.search()["data"] - assert datasets - dataset_id = datasets[0]["id"] - files_selected = dataset_client.search_files(dataset_id=dataset_id)["data"] - assert isinstance(files_selected, list) - for f in files_selected: - assert any(d["id"] == dataset_id for d in f.get("datasets", [])) - files_all = dataset_client.search_files()["data"] - assert isinstance(files_all, list) - has_dataset = any(f.get("datasets") for f in files_all) - has_no_dataset = any(not f.get("datasets") for f in files_all) - assert has_dataset or has_no_dataset - - def test_create_and_delete_dataset(self, dataset_client): - dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - create_resp = dataset_client.create_dataset(name=dataset_name) - assert "detail" in create_resp - assert "successfully created" in create_resp["detail"].lower() - search_resp = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) - assert any(d["name"] == dataset_name for d in search_resp["data"]) - delete_resp = dataset_client.delete_dataset(name=dataset_name) - assert "detail" in delete_resp - assert "successfully deleted" in delete_resp["detail"].lower() - search_after = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) - assert all(d["name"] != dataset_name for d in search_after["data"]) - - @pytest.mark.skip(reason="Successfully creates dataset") - def test_create_dataset_with_empty_name(self, dataset_tracker): - created, client = dataset_tracker - - with pytest.raises(HTTPError) as e: - client.create_dataset(name="") - - assert e.value.status_code in (400, 422) - - def test_create_duplicate_dataset(self, dataset_tracker): - created, client = dataset_tracker - dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - resp = client.create_dataset(name=dataset_name) - created.append(dataset_name) - assert "successfully created" in resp["detail"].lower() - with pytest.raises(HTTPError) as exc: - client.create_dataset(name=dataset_name) - assert exc.value.status_code == 400 - assert "already exists" in exc.value.body.lower() - - def test_search_existing_dataset(self, dataset_tracker): - created, client = dataset_tracker - dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - resp = client.create_dataset(name=dataset_name) - created.append(dataset_name) - assert "successfully created" in resp["detail"].lower() - - search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) - names = [d["name"] for d in search_resp["data"]] - assert dataset_name in names - - def test_search_non_existing_dataset(self, dataset_client): - search_resp = dataset_client.search( - filters=[{"field": "name", "operator": "eq", "value": "non_existing_dataset"}] - ) - assert search_resp["data"] == [] - - def test_search_multiple_existing_datasets(self, dataset_tracker): - created, client = dataset_tracker - names = [f"autotest_{uuid.uuid4().hex[:8]}" for _ in range(2)] - for n in names: - resp = client.create_dataset(name=n) - created.append(n) - assert "successfully created" in resp["detail"].lower() - - search_resp = client.search(filters=[{"field": "name", "operator": "in", "value": names}]) - found_names = {d["name"] for d in search_resp["data"]} - assert set(names) <= found_names - - -class TestFiles: - def test_upload_and_delete_file(self, file_tracker, tmp_path): - created_files, client = file_tracker - try: - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - assert file_info["status"] is True - assert "id" in file_info - assert "file_name" in file_info - created_files.append(file_info) - search = client.search_files() - ids = [f["id"] for f in search["data"]] - assert file_info["id"] in ids - delete_result = client.delete_files([file_info["id"]]) - assert delete_result[0]["status"] is True - assert delete_result[0]["action"] == "delete" - search_after = client.search_files() - ids_after = [f["id"] for f in search_after["data"]] - assert file_info["id"] not in ids_after - created_files.clear() - finally: - if temp_file.exists(): - temp_file.unlink() - - def test_upload_invalid_format(self, file_client, tmp_path): - invalid_file = tmp_path / f"{uuid.uuid4().hex}.py" - invalid_file.write_text("this is py file") - - with pytest.raises(HTTPError) as exc: - file_client.upload_file(str(invalid_file)) - - assert exc.value.status_code == 400 - - @pytest.mark.skip(reason="Uploads a file, but returns 500") - @pytest.mark.parametrize("content", ["", " "]) - def test_upload_empty_file(self, file_client, tmp_path, content): - empty_file = tmp_path / f"{uuid.uuid4().hex}_empty.pdf" - empty_file.write_text(content) - with pytest.raises(HTTPError) as exc: - file_client.upload_file(str(empty_file)) - assert exc.value.status_code == 400 - - def test_move_file(self, file_tracker, dataset_tracker, tmp_path): - created_datasets, dataset_client = dataset_tracker - - first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - - first_resp = dataset_client.create_dataset(name=first_dataset_name) - created_datasets.append(first_dataset_name) - assert "successfully created" in first_resp["detail"].lower() - first_dataset_id = dataset_client.search( - filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}] - )["data"][0]["id"] - - second_resp = dataset_client.create_dataset(name=second_dataset_name) - created_datasets.append(second_dataset_name) - assert "successfully created" in second_resp["detail"].lower() - second_dataset_id = dataset_client.search( - filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}] - )["data"][0]["id"] - - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - created_files.append(file_info) - file_id = file_info["id"] - try: - move1 = client.move_files(name=first_dataset_name, objects=[file_id])[0] - assert move1["status"] is True - assert "successfully bounded" in move1["message"].lower() - files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] - assert any(f["id"] == file_id for f in files_in_first) - move2 = client.move_files(name=second_dataset_name, objects=[file_id])[0] - assert move2["status"] is True - assert "successfully bounded" in move2["message"].lower() - files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] - assert any(f["id"] == file_id for f in files_in_second) - finally: - if temp_file.exists(): - temp_file.unlink() - - def test_clear_search_files(self, file_tracker, tmp_path): - created_files, client = file_tracker - result = client.search_files() - assert "pagination" in result - assert "data" in result - assert isinstance(result["data"], list) - pagination = result["pagination"] - required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} - assert required_pagination_keys <= pagination.keys() - for file in result["data"]: - required_file_keys = { - "id", - "original_name", - "bucket", - "size_in_bytes", - "extension", - "original_ext", - "content_type", - "pages", - "last_modified", - "status", - "path", - "datasets", - } - assert required_file_keys <= file.keys() - assert isinstance(file["id"], int) - assert isinstance(file["original_name"], str) - assert isinstance(file["size_in_bytes"], int) - - def test_search_existing_file(self, file_tracker, tmp_path): - created_files, client = file_tracker - try: - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - assert file_info["status"] is True - search_resp = client.search_files( - filters=[{"field": "original_name", "operator": "eq", "value": file_info["file_name"]}] - ) - names = [f["original_name"] for f in search_resp["data"]] - assert file_info["file_name"] in names - finally: - if temp_file.exists(): - temp_file.unlink() - - def test_search_non_existing_file(self, file_client): - search_resp = file_client.search_files( - filters=[{"field": "original_name", "operator": "eq", "value": "definitely_not_a_file.pdf"}] - ) - assert search_resp["data"] == [] - - def test_search_multiple_existing_files(self, file_tracker, tmp_path): - created_files, client = file_tracker - f1, t1 = client.upload_temp_file(client, file_tracker, tmp_path) - f2, t2 = client.upload_temp_file(client, file_tracker, tmp_path) - names = [f1["file_name"], f2["file_name"]] - - search = client.search_files(filters=[{"field": "original_name", "operator": "in", "value": names}]) - found_names = {f["original_name"] for f in search["data"]} - assert set(names) <= found_names - - t1.unlink(missing_ok=True) - t2.unlink(missing_ok=True) - - def test_download_existing_file(self, file_tracker, tmp_path): - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - file_id = file_info["id"] - - content = client.download_file(file_id) - assert isinstance(content, (bytes, bytearray)) - assert len(content) > 100 - assert content.startswith(b"%PDF") - - temp_file.unlink(missing_ok=True) - - def test_download_nonexistent_file(self, file_client): - with pytest.raises(HTTPError) as exc: - file_client.download_file(9999999) - assert exc.value.status_code == 404 - - @pytest.mark.parametrize("field", ["original_name", "last_modified", "size_in_bytes"]) - @pytest.mark.parametrize("direction", ["asc", "desc"]) - # name descending fails - def test_files_sorting(self, file_client, field, direction): - resp = file_client.post_json( - "/assets/files/search", - json={ - "pagination": {"page_num": 1, "page_size": 15}, - "filters": [{"field": "original_name", "operator": "ilike", "value": "%%"}], - "sorting": [{"direction": direction, "field": field}], - }, - headers=file_client._default_headers(content_type_json=True), - ) - - data = resp["data"] - values = [d[field] for d in data if field in d] - - if field == "last_modified": - values = [datetime.fromisoformat(v) for v in values] - - if field == "size_in_bytes": - values = [int(v) for v in values] - - expected = sorted(values, reverse=(direction == "desc")) - assert values == expected, f"{field} not sorted {direction}" - - -class TestJobs: - def test_create_and_poll_job( - self, file_client, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid - ): - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - created_datasets, dataset_client = dataset_tracker - - dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" - dataset_client.create_dataset(name=dataset_name) - created_datasets.append(dataset_name) - move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] - assert move_resp["status"] is True - job_name = f"test_job_{uuid.uuid4().hex[:8]}" - create_resp = jobs_client.create_job( - name=job_name, - file_ids=[file_info["id"]], - owners=[user_uuid], - ) - job_tracker[0].append(create_resp) - job_id = create_resp.get("id") - assert job_id - final_job = jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) - status = final_job.get("status") - assert str(status).lower() in {"finished", "success", "completed"} - job_files = final_job.get("files") or [] - assert file_info["id"] in job_files - - @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) - @pytest.mark.parametrize("direction", ["asc", "desc"]) - # descending name sorting works weird - def test_sorting(self, jobs_client, field, direction): - resp = jobs_client.post_json( - "/jobs/jobs/search", - json={ - "pagination": {"page_num": 1, "page_size": 15}, - "filters": [], - "sorting": [{"direction": direction, "field": field}], - }, - headers=jobs_client._default_headers(content_type_json=True), - ) - data = resp["data"] - values = [d[field] for d in data if field in d and d[field] is not None] - - if field in {"creation_datetime", "deadline"}: - values = [datetime.fromisoformat(v) for v in values] - - expected = sorted(values, reverse=(direction == "desc")) - assert values == expected - - @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) - def test_job_search(self, jobs_client, job_tracker, file_tracker, dataset_tracker, user_uuid, tmp_path, field): - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - created_datasets, dataset_client = dataset_tracker - - dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" - dataset_client.create_dataset(name=dataset_name) - created_datasets.append(dataset_name) - - job_name = f"test_job_{uuid.uuid4().hex[:8]}" - create_resp = jobs_client.create_job( - name=job_name, - file_ids=[file_info["id"]], - owners=[user_uuid], - ) - job_id = create_resp.get("id") - jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) - job_tracker[0].append(create_resp) - search_value = create_resp.get(field, None) - - filters = [ - {"field": field, "operator": "eq", "value": search_value}, - {"field": "name", "operator": "eq", "value": job_name}, - ] - - search_resp = jobs_client.post_json( - "/jobs/jobs/search", - json={ - "pagination": {"page_num": 1, "page_size": 100}, - "filters": filters, - }, - headers=jobs_client._default_headers(content_type_json=True), - ) - - job_ids = [j["id"] for j in search_resp["data"]] - assert job_id in job_ids - - @pytest.mark.parametrize("field", ["creation_datetime", "deadline"]) - def test_date_range_filter(self, jobs_client, field): - start = (datetime.utcnow() - timedelta(days=365)).replace(microsecond=0).isoformat() - end = (datetime.utcnow() + timedelta(days=365)).replace(microsecond=0).isoformat() - - resp = jobs_client.post_json( - "/jobs/jobs/search", - json={ - "pagination": {"page_num": 1, "page_size": 15}, - "filters": [ - {"field": field, "operator": "ge", "value": start}, - {"field": field, "operator": "le", "value": end}, - ], - }, - headers=jobs_client._default_headers(content_type_json=True), - ) - - data = resp["data"] - for job in data: - if field in job and job[field] is not None: - date_val = datetime.fromisoformat(job[field]) - assert datetime.fromisoformat(start) <= date_val <= datetime.fromisoformat(end) - - -class TestCategories: - @pytest.mark.skip(reason="Creation works, but deletion not implemented, will be cluttered by multiple runs") - def test_create_and_delete_category(self, auth_token, settings, tenant, categories_client): - access_token, _ = auth_token - - unique_id = f"test_cat_{uuid.uuid4().hex[:6]}" - created = categories_client.create_category(category_id=unique_id, name=unique_id, parent="example") - assert created.id == unique_id - search_result = categories_client.search_categories(page_size=100) - ids = [c.id for c in search_result.data] - assert unique_id in ids, f"Category {unique_id} not found after creation" - - deleted = categories_client.delete_category(unique_id) - assert deleted.get("detail") or deleted.get("status") or "success" in str(deleted).lower() - search_after_delete = categories_client.search_categories(page_size=100) - ids_after = [c.id for c in search_after_delete.data] - assert unique_id not in ids_after, f"Category {unique_id} still present after deletion" - - -class TestReports: - def test_export_tasks_csv(self, reports_client, user_uuid): - csv_text = reports_client.export_tasks( - user_ids=[user_uuid], - date_from="2025-05-01 00:00:00", - date_to="2025-08-31 00:00:00", - ) - assert "annotator_id" in csv_text - assert "task_id" in csv_text - - @pytest.mark.parametrize( - "date_from,date_to", - [ - ("2028-05-01 00:00:00", "2028-08-31 00:00:00"), - ("1900-01-01 00:00:00", "1900-12-31 00:00:00"), - ("2025-09-01 00:00:00", "2025-08-01 00:00:00"), - ], - ) - def test_export_tasks_wrong_date(self, reports_client, user_uuid, date_from, date_to): - with pytest.raises(HTTPError) as exc: - reports_client.export_tasks( - user_ids=[user_uuid], - date_from=date_from, - date_to=date_to, - ) - assert exc.value.status_code == 406 - - -class TestPlugins: - def test_create_and_delete_plugin(self, plugins_tracker): - created, plugins_client = plugins_tracker - unique_name = f"plugin_{uuid.uuid4().hex[:8]}" - resp = plugins_client.create_plugin( - name=unique_name, - menu_name=unique_name, - description="bar", - version="1", - url="http://what.com/what", - is_iframe=True, - ) - plugin_id = resp["id"] - created.append(plugin_id) - - plugins = plugins_client.get_plugins() - assert any(p["id"] == plugin_id for p in plugins) - assert any(p["name"] == unique_name for p in plugins) - - plugins_client.delete_plugin(plugin_id) - - plugins = plugins_client.get_plugins() - assert not any(p["id"] == plugin_id for p in plugins) - - def test_update_plugin(self, plugins_tracker): - created, plugins_client = plugins_tracker - unique_name = f"plugin_{uuid.uuid4().hex[:8]}" - resp = plugins_client.create_plugin( - name=unique_name, - menu_name=unique_name, - description="bar", - version="1", - url="http://what.com/what", - is_iframe=True, - ) - plugin_id = resp["id"] - created.append(plugin_id) - - updated_payload = { - "name": unique_name, - "menu_name": unique_name, - "description": "updated desc", - "version": "1", - "url": "http://what.com/what", - "is_iframe": True, - } - update_resp = plugins_client.update_plugin(plugin_id, **updated_payload) - assert update_resp["description"] == "updated desc" - - plugins = plugins_client.get_plugins() - updated = next(p for p in plugins if p["id"] == plugin_id) - assert updated["description"] == "updated desc" diff --git a/test_automation_framework/tests/test_categories.py b/test_automation_framework/tests/test_categories.py new file mode 100644 index 000000000..5d6f73cbf --- /dev/null +++ b/test_automation_framework/tests/test_categories.py @@ -0,0 +1,25 @@ +from logging import getLogger +import uuid + +import pytest + +logger = getLogger(__name__) + + +class TestCategories: + @pytest.mark.skip(reason="Creation works, but deletion not implemented, will be cluttered by multiple runs") + def test_create_and_delete_category(self, auth_token, settings, tenant, categories_client): + access_token, _ = auth_token + + unique_id = f"test_cat_{uuid.uuid4().hex[:6]}" + created = categories_client.create_category(category_id=unique_id, name=unique_id, parent="example") + assert created.id == unique_id + search_result = categories_client.search_categories(page_size=100) + ids = [c.id for c in search_result.data] + assert unique_id in ids, f"Category {unique_id} not found after creation" + + deleted = categories_client.delete_category(unique_id) + assert deleted.get("detail") or deleted.get("status") or "success" in str(deleted).lower() + search_after_delete = categories_client.search_categories(page_size=100) + ids_after = [c.id for c in search_after_delete.data] + assert unique_id not in ids_after, f"Category {unique_id} still present after deletion" diff --git a/test_automation_framework/tests/test_datasets.py b/test_automation_framework/tests/test_datasets.py new file mode 100644 index 000000000..c730e511d --- /dev/null +++ b/test_automation_framework/tests/test_datasets.py @@ -0,0 +1,112 @@ +from logging import getLogger +import uuid + +import pytest + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestDatasets: + def test_clear_search_for_datasets(self, dataset_client): + result = dataset_client.search() + assert "pagination" in result + assert "data" in result + assert isinstance(result["data"], list) + pagination = result["pagination"] + required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} + assert required_pagination_keys <= pagination.keys() + for dataset in result["data"]: + required_dataset_keys = {"id", "name", "count", "created"} + assert required_dataset_keys <= dataset.keys() + assert isinstance(dataset["id"], int) + assert isinstance(dataset["name"], str) + assert isinstance(dataset["count"], int) + + def test_search_sorting(self, dataset_client): + result = dataset_client.search(sorting=[{"direction": "desc", "field": "name"}]) + names = [d["name"] for d in result["data"]] + assert names == sorted(names, reverse=True) + + def test_search_pagination(self, dataset_client): + result = dataset_client.search(page_num=1, page_size=15) + assert len(result["data"]) <= 15 + assert result["pagination"]["page_num"] == 1 + + def test_selection(self, dataset_client): + datasets = dataset_client.search()["data"] + assert datasets + dataset_id = datasets[0]["id"] + files_selected = dataset_client.search_files(dataset_id=dataset_id)["data"] + assert isinstance(files_selected, list) + for f in files_selected: + assert any(d["id"] == dataset_id for d in f.get("datasets", [])) + files_all = dataset_client.search_files()["data"] + assert isinstance(files_all, list) + has_dataset = any(f.get("datasets") for f in files_all) + has_no_dataset = any(not f.get("datasets") for f in files_all) + assert has_dataset or has_no_dataset + + def test_create_and_delete_dataset(self, dataset_client): + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + create_resp = dataset_client.create_dataset(name=dataset_name) + assert "detail" in create_resp + assert "successfully created" in create_resp["detail"].lower() + search_resp = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert any(d["name"] == dataset_name for d in search_resp["data"]) + delete_resp = dataset_client.delete_dataset(name=dataset_name) + assert "detail" in delete_resp + assert "successfully deleted" in delete_resp["detail"].lower() + search_after = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert all(d["name"] != dataset_name for d in search_after["data"]) + + @pytest.mark.skip(reason="Successfully creates dataset") + def test_create_dataset_with_empty_name(self, dataset_tracker): + created, client = dataset_tracker + + with pytest.raises(HTTPError) as e: + client.create_dataset(name="") + + assert e.value.status_code in (400, 422) + + def test_create_duplicate_dataset(self, dataset_tracker): + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + with pytest.raises(HTTPError) as exc: + client.create_dataset(name=dataset_name) + assert exc.value.status_code == 400 + assert "already exists" in exc.value.body.lower() + + def test_search_existing_dataset(self, dataset_tracker): + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + + search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + names = [d["name"] for d in search_resp["data"]] + assert dataset_name in names + + def test_search_non_existing_dataset(self, dataset_client): + search_resp = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": "non_existing_dataset"}] + ) + assert search_resp["data"] == [] + + def test_search_multiple_existing_datasets(self, dataset_tracker): + created, client = dataset_tracker + names = [f"autotest_{uuid.uuid4().hex[:8]}" for _ in range(2)] + for n in names: + resp = client.create_dataset(name=n) + created.append(n) + assert "successfully created" in resp["detail"].lower() + + search_resp = client.search(filters=[{"field": "name", "operator": "in", "value": names}]) + found_names = {d["name"] for d in search_resp["data"]} + assert set(names) <= found_names diff --git a/test_automation_framework/tests/test_files.py b/test_automation_framework/tests/test_files.py new file mode 100644 index 000000000..b815be05f --- /dev/null +++ b/test_automation_framework/tests/test_files.py @@ -0,0 +1,196 @@ +from logging import getLogger +from datetime import datetime +import uuid + +import pytest + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestFiles: + def test_upload_and_delete_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + try: + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + assert "id" in file_info + assert "file_name" in file_info + created_files.append(file_info) + search = client.search_files() + ids = [f["id"] for f in search["data"]] + assert file_info["id"] in ids + delete_result = client.delete_files([file_info["id"]]) + assert delete_result[0]["status"] is True + assert delete_result[0]["action"] == "delete" + search_after = client.search_files() + ids_after = [f["id"] for f in search_after["data"]] + assert file_info["id"] not in ids_after + created_files.clear() + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_upload_invalid_format(self, file_client, tmp_path): + invalid_file = tmp_path / f"{uuid.uuid4().hex}.py" + invalid_file.write_text("this is py file") + + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(invalid_file)) + + assert exc.value.status_code == 400 + + @pytest.mark.skip(reason="Uploads a file, but returns 500") + @pytest.mark.parametrize("content", ["", " "]) + def test_upload_empty_file(self, file_client, tmp_path, content): + empty_file = tmp_path / f"{uuid.uuid4().hex}_empty.pdf" + empty_file.write_text(content) + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(empty_file)) + assert exc.value.status_code == 400 + + def test_move_file(self, file_tracker, dataset_tracker, tmp_path): + created_datasets, dataset_client = dataset_tracker + + first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + + first_resp = dataset_client.create_dataset(name=first_dataset_name) + created_datasets.append(first_dataset_name) + assert "successfully created" in first_resp["detail"].lower() + first_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}] + )["data"][0]["id"] + + second_resp = dataset_client.create_dataset(name=second_dataset_name) + created_datasets.append(second_dataset_name) + assert "successfully created" in second_resp["detail"].lower() + second_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}] + )["data"][0]["id"] + + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_files.append(file_info) + file_id = file_info["id"] + try: + move1 = client.move_files(name=first_dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + move2 = client.move_files(name=second_dataset_name, objects=[file_id])[0] + assert move2["status"] is True + assert "successfully bounded" in move2["message"].lower() + files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_second) + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_clear_search_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + result = client.search_files() + assert "pagination" in result + assert "data" in result + assert isinstance(result["data"], list) + pagination = result["pagination"] + required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} + assert required_pagination_keys <= pagination.keys() + for file in result["data"]: + required_file_keys = { + "id", + "original_name", + "bucket", + "size_in_bytes", + "extension", + "original_ext", + "content_type", + "pages", + "last_modified", + "status", + "path", + "datasets", + } + assert required_file_keys <= file.keys() + assert isinstance(file["id"], int) + assert isinstance(file["original_name"], str) + assert isinstance(file["size_in_bytes"], int) + + def test_search_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + try: + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + search_resp = client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": file_info["file_name"]}] + ) + names = [f["original_name"] for f in search_resp["data"]] + assert file_info["file_name"] in names + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_search_non_existing_file(self, file_client): + search_resp = file_client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": "definitely_not_a_file.pdf"}] + ) + assert search_resp["data"] == [] + + def test_search_multiple_existing_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + f1, t1 = client.upload_temp_file(client, file_tracker, tmp_path) + f2, t2 = client.upload_temp_file(client, file_tracker, tmp_path) + names = [f1["file_name"], f2["file_name"]] + + search = client.search_files(filters=[{"field": "original_name", "operator": "in", "value": names}]) + found_names = {f["original_name"] for f in search["data"]} + assert set(names) <= found_names + + t1.unlink(missing_ok=True) + t2.unlink(missing_ok=True) + + def test_download_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + + content = client.download_file(file_id) + assert isinstance(content, (bytes, bytearray)) + assert len(content) > 100 + assert content.startswith(b"%PDF") + + temp_file.unlink(missing_ok=True) + + def test_download_nonexistent_file(self, file_client): + with pytest.raises(HTTPError) as exc: + file_client.download_file(9999999) + assert exc.value.status_code == 404 + + @pytest.mark.parametrize("field", ["original_name", "last_modified", "size_in_bytes"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # name descending fails + def test_files_sorting(self, file_client, field, direction): + resp = file_client.post_json( + "/assets/files/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [{"field": "original_name", "operator": "ilike", "value": "%%"}], + "sorting": [{"direction": direction, "field": field}], + }, + headers=file_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + values = [d[field] for d in data if field in d] + + if field == "last_modified": + values = [datetime.fromisoformat(v) for v in values] + + if field == "size_in_bytes": + values = [int(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected, f"{field} not sorted {direction}" diff --git a/test_automation_framework/tests/test_jobs.py b/test_automation_framework/tests/test_jobs.py new file mode 100644 index 000000000..03a8b559e --- /dev/null +++ b/test_automation_framework/tests/test_jobs.py @@ -0,0 +1,120 @@ +from logging import getLogger +from datetime import datetime, timedelta +import uuid + +import pytest + + +logger = getLogger(__name__) + + +class TestJobs: + def test_create_and_poll_job( + self, file_client, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid + ): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] + assert move_resp["status"] is True + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_tracker[0].append(create_resp) + job_id = create_resp.get("id") + assert job_id + final_job = jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) + status = final_job.get("status") + assert str(status).lower() in {"finished", "success", "completed"} + job_files = final_job.get("files") or [] + assert file_info["id"] in job_files + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # descending name sorting works weird + def test_sorting(self, jobs_client, field, direction): + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [], + "sorting": [{"direction": direction, "field": field}], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + data = resp["data"] + values = [d[field] for d in data if field in d and d[field] is not None] + + if field in {"creation_datetime", "deadline"}: + values = [datetime.fromisoformat(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + def test_job_search(self, jobs_client, job_tracker, file_tracker, dataset_tracker, user_uuid, tmp_path, field): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_id = create_resp.get("id") + jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) + job_tracker[0].append(create_resp) + search_value = create_resp.get(field, None) + + filters = [ + {"field": field, "operator": "eq", "value": search_value}, + {"field": "name", "operator": "eq", "value": job_name}, + ] + + search_resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 100}, + "filters": filters, + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + job_ids = [j["id"] for j in search_resp["data"]] + assert job_id in job_ids + + @pytest.mark.parametrize("field", ["creation_datetime", "deadline"]) + def test_date_range_filter(self, jobs_client, field): + start = (datetime.utcnow() - timedelta(days=365)).replace(microsecond=0).isoformat() + end = (datetime.utcnow() + timedelta(days=365)).replace(microsecond=0).isoformat() + + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [ + {"field": field, "operator": "ge", "value": start}, + {"field": field, "operator": "le", "value": end}, + ], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + for job in data: + if field in job and job[field] is not None: + date_val = datetime.fromisoformat(job[field]) + assert datetime.fromisoformat(start) <= date_val <= datetime.fromisoformat(end) diff --git a/test_automation_framework/tests/test_other.py b/test_automation_framework/tests/test_other.py new file mode 100644 index 000000000..396183370 --- /dev/null +++ b/test_automation_framework/tests/test_other.py @@ -0,0 +1,24 @@ +from logging import getLogger + +logger = getLogger(__name__) + + +class TestMenu: + def test_menu(self, menu_client): + menu = menu_client.get_menu() + assert isinstance(menu, list) + assert menu + required_keys = {"name", "badgerdoc_path", "is_external", "is_iframe", "url", "children"} + for item in menu: + assert required_keys <= item.keys() + first_item = menu[0] + assert isinstance(first_item["name"], str) + assert isinstance(first_item["badgerdoc_path"], str) + assert isinstance(first_item["is_external"], bool) + assert isinstance(first_item["children"], (list, type(None))) + expected_names = {"Documents", "My Tasks", "Jobs", "Settings"} + actual_names = {item["name"] for item in menu} + assert expected_names <= actual_names + settings_item = next(i for i in menu if i["name"] == "Settings") + assert isinstance(settings_item["children"], list) + assert any(child["name"] == "Keycloak" for child in settings_item["children"]) diff --git a/test_automation_framework/tests/test_plugins.py b/test_automation_framework/tests/test_plugins.py new file mode 100644 index 000000000..cf31abf83 --- /dev/null +++ b/test_automation_framework/tests/test_plugins.py @@ -0,0 +1,58 @@ +from logging import getLogger +import uuid + +logger = getLogger(__name__) + + +class TestPlugins: + def test_create_and_delete_plugin(self, plugins_tracker): + created, plugins_client = plugins_tracker + unique_name = f"plugin_{uuid.uuid4().hex[:8]}" + resp = plugins_client.create_plugin( + name=unique_name, + menu_name=unique_name, + description="bar", + version="1", + url="http://what.com/what", + is_iframe=True, + ) + plugin_id = resp["id"] + created.append(plugin_id) + + plugins = plugins_client.get_plugins() + assert any(p["id"] == plugin_id for p in plugins) + assert any(p["name"] == unique_name for p in plugins) + + plugins_client.delete_plugin(plugin_id) + + plugins = plugins_client.get_plugins() + assert not any(p["id"] == plugin_id for p in plugins) + + def test_update_plugin(self, plugins_tracker): + created, plugins_client = plugins_tracker + unique_name = f"plugin_{uuid.uuid4().hex[:8]}" + resp = plugins_client.create_plugin( + name=unique_name, + menu_name=unique_name, + description="bar", + version="1", + url="http://what.com/what", + is_iframe=True, + ) + plugin_id = resp["id"] + created.append(plugin_id) + + updated_payload = { + "name": unique_name, + "menu_name": unique_name, + "description": "updated desc", + "version": "1", + "url": "http://what.com/what", + "is_iframe": True, + } + update_resp = plugins_client.update_plugin(plugin_id, **updated_payload) + assert update_resp["description"] == "updated desc" + + plugins = plugins_client.get_plugins() + updated = next(p for p in plugins if p["id"] == plugin_id) + assert updated["description"] == "updated desc" diff --git a/test_automation_framework/tests/test_reports.py b/test_automation_framework/tests/test_reports.py new file mode 100644 index 000000000..fc51f96cd --- /dev/null +++ b/test_automation_framework/tests/test_reports.py @@ -0,0 +1,36 @@ +from logging import getLogger + +import pytest + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestReports: + def test_export_tasks_csv(self, reports_client, user_uuid): + csv_text = reports_client.export_tasks( + user_ids=[user_uuid], + date_from="2025-05-01 00:00:00", + date_to="2025-08-31 00:00:00", + ) + assert "annotator_id" in csv_text + assert "task_id" in csv_text + + @pytest.mark.parametrize( + "date_from,date_to", + [ + ("2028-05-01 00:00:00", "2028-08-31 00:00:00"), + ("1900-01-01 00:00:00", "1900-12-31 00:00:00"), + ("2025-09-01 00:00:00", "2025-08-01 00:00:00"), + ], + ) + def test_export_tasks_wrong_date(self, reports_client, user_uuid, date_from, date_to): + with pytest.raises(HTTPError) as exc: + reports_client.export_tasks( + user_ids=[user_uuid], + date_from=date_from, + date_to=date_to, + ) + assert exc.value.status_code == 406 From e79a68f8962ac5b6b810bb39fc9932d47e8cb2ec Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 5 Sep 2025 16:39:02 +0200 Subject: [PATCH 21/37] added select all - unselect all frontend tests --- test_automation_framework/conftest.py | 17 +- .../auth/{auth_service.py => auth_client.py} | 2 +- test_automation_framework/pdm.lock | 162 +++++++++++++++++- test_automation_framework/pyproject.toml | 2 +- test_automation_framework/tests/test_files.py | 27 +++ .../tests/test_frontend.py | 25 +++ 6 files changed, 229 insertions(+), 6 deletions(-) rename test_automation_framework/helpers/auth/{auth_service.py => auth_client.py} (98%) create mode 100644 test_automation_framework/tests/test_frontend.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index d99090344..54fee5d88 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -5,7 +5,7 @@ import pytest from settings import load_settings -from helpers.auth.auth_service import AuthService +from helpers.auth.auth_client import AuthClient from helpers.base_client.base_client import BaseClient from helpers.datasets.dataset_client import DatasetClient from helpers.files.file_client import FileClient @@ -16,6 +16,8 @@ from helpers.reports.reports_client import ReportsClient from helpers.plugins.plugins_client import PluginsClient +from playwright.sync_api import Page + logger = getLogger(__name__) @@ -41,8 +43,8 @@ def base_client(settings) -> BaseClient: @pytest.fixture(scope="session") -def auth_service(base_client) -> AuthService: - return AuthService(base_client) +def auth_service(base_client) -> AuthClient: + return AuthClient(base_client) @pytest.fixture(scope="session") @@ -148,3 +150,12 @@ def plugins_tracker(plugins_client): logger.info(f"[plugins_tracker] Deleted plugin {id}") except Exception as e: logger.warning(f"[plugins_tracker] Failed to delete plugin {id}: {e}") + + +@pytest.fixture +def logged_in_page(page: Page) -> Page: + page.goto("http://demo.badgerdoc.com:8083/login") + page.get_by_role("textbox", name="Username").fill("admin") + page.get_by_role("textbox", name="Password").fill("admin") + page.get_by_role("button", name="Login", exact=True).click() + return page diff --git a/test_automation_framework/helpers/auth/auth_service.py b/test_automation_framework/helpers/auth/auth_client.py similarity index 98% rename from test_automation_framework/helpers/auth/auth_service.py rename to test_automation_framework/helpers/auth/auth_client.py index 89753c18c..564f841bb 100644 --- a/test_automation_framework/helpers/auth/auth_service.py +++ b/test_automation_framework/helpers/auth/auth_client.py @@ -15,7 +15,7 @@ class TokenResponse(BaseModel): expires_in: Optional[int] = None -class AuthService: +class AuthClient: def __init__(self, client: BaseClient) -> None: self.client = client diff --git a/test_automation_framework/pdm.lock b/test_automation_framework/pdm.lock index d597e7a73..87dac6fc8 100644 --- a/test_automation_framework/pdm.lock +++ b/test_automation_framework/pdm.lock @@ -5,7 +5,7 @@ groups = ["default"] strategy = ["inherit_metadata"] lock_version = "4.5.0" -content_hash = "sha256:d56b4fa3df2a34dc34169a0a8ae56c73e69c5bec37976962b9e4dfc446248a24" +content_hash = "sha256:dae4296a33b08d8e1097eca715ab509b87a4de17a1323c1c611b25724b9b5cf5" [[metadata.targets]] requires_python = "==3.13.*" @@ -63,6 +63,28 @@ files = [ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, ] +[[package]] +name = "charset-normalizer" +version = "3.4.3" +requires_python = ">=3.7" +summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +groups = ["default"] +files = [ + {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"}, + {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"}, + {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"}, +] + [[package]] name = "colorama" version = "0.4.6" @@ -108,6 +130,25 @@ files = [ {file = "filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58"}, ] +[[package]] +name = "greenlet" +version = "3.2.4" +requires_python = ">=3.9" +summary = "Lightweight in-process concurrent programming" +groups = ["default"] +files = [ + {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, + {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, + {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, +] + [[package]] name = "h11" version = "0.16.0" @@ -217,6 +258,27 @@ files = [ {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, ] +[[package]] +name = "playwright" +version = "1.55.0" +requires_python = ">=3.9" +summary = "A high-level API to automate web browsers" +groups = ["default"] +dependencies = [ + "greenlet<4.0.0,>=3.1.1", + "pyee<14,>=13", +] +files = [ + {file = "playwright-1.55.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:d7da108a95001e412effca4f7610de79da1637ccdf670b1ae3fdc08b9694c034"}, + {file = "playwright-1.55.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8290cf27a5d542e2682ac274da423941f879d07b001f6575a5a3a257b1d4ba1c"}, + {file = "playwright-1.55.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:25b0d6b3fd991c315cca33c802cf617d52980108ab8431e3e1d37b5de755c10e"}, + {file = "playwright-1.55.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:c6d4d8f6f8c66c483b0835569c7f0caa03230820af8e500c181c93509c92d831"}, + {file = "playwright-1.55.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a0777c4ce1273acf90c87e4ae2fe0130182100d99bcd2ae5bf486093044838"}, + {file = "playwright-1.55.0-py3-none-win32.whl", hash = "sha256:29e6d1558ad9d5b5c19cbec0a72f6a2e35e6353cd9f262e22148685b86759f90"}, + {file = "playwright-1.55.0-py3-none-win_amd64.whl", hash = "sha256:7eb5956473ca1951abb51537e6a0da55257bb2e25fc37c2b75af094a5c93736c"}, + {file = "playwright-1.55.0-py3-none-win_arm64.whl", hash = "sha256:012dc89ccdcbd774cdde8aeee14c08e0dd52ddb9135bf10e9db040527386bd76"}, +] + [[package]] name = "pluggy" version = "1.6.0" @@ -309,6 +371,20 @@ files = [ {file = "pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee"}, ] +[[package]] +name = "pyee" +version = "13.0.0" +requires_python = ">=3.8" +summary = "A rough port of Node.js's EventEmitter to Python with a few tricks of its own" +groups = ["default"] +dependencies = [ + "typing-extensions", +] +files = [ + {file = "pyee-13.0.0-py3-none-any.whl", hash = "sha256:48195a3cddb3b1515ce0695ed76036b5ccc2ef3a9f963ff9f77aec0139845498"}, + {file = "pyee-13.0.0.tar.gz", hash = "sha256:b391e3c5a434d1f5118a25615001dbc8f669cf410ab67d04c4d4e07c55481c37"}, +] + [[package]] name = "pygments" version = "2.19.2" @@ -340,6 +416,38 @@ files = [ {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, ] +[[package]] +name = "pytest-base-url" +version = "2.1.0" +requires_python = ">=3.8" +summary = "pytest plugin for URL based testing" +groups = ["default"] +dependencies = [ + "pytest>=7.0.0", + "requests>=2.9", +] +files = [ + {file = "pytest_base_url-2.1.0-py3-none-any.whl", hash = "sha256:3ad15611778764d451927b2a53240c1a7a591b521ea44cebfe45849d2d2812e6"}, + {file = "pytest_base_url-2.1.0.tar.gz", hash = "sha256:02748589a54f9e63fcbe62301d6b0496da0d10231b753e950c63e03aee745d45"}, +] + +[[package]] +name = "pytest-playwright" +version = "0.7.0" +requires_python = ">=3.9" +summary = "A pytest wrapper with fixtures for Playwright to automate web browsers" +groups = ["default"] +dependencies = [ + "playwright>=1.18", + "pytest-base-url<3.0.0,>=1.0.0", + "pytest<9.0.0,>=6.2.4", + "python-slugify<9.0.0,>=6.0.0", +] +files = [ + {file = "pytest_playwright-0.7.0-py3-none-any.whl", hash = "sha256:2516d0871fa606634bfe32afbcc0342d68da2dbff97fe3459849e9c428486da2"}, + {file = "pytest_playwright-0.7.0.tar.gz", hash = "sha256:b3f2ea514bbead96d26376fac182f68dcd6571e7cb41680a89ff1673c05d60b6"}, +] + [[package]] name = "python-dotenv" version = "1.1.1" @@ -351,6 +459,20 @@ files = [ {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, ] +[[package]] +name = "python-slugify" +version = "8.0.4" +requires_python = ">=3.7" +summary = "A Python slugify application that also handles Unicode" +groups = ["default"] +dependencies = [ + "text-unidecode>=1.3", +] +files = [ + {file = "python-slugify-8.0.4.tar.gz", hash = "sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856"}, + {file = "python_slugify-8.0.4-py2.py3-none-any.whl", hash = "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8"}, +] + [[package]] name = "pyyaml" version = "6.0.2" @@ -370,6 +492,23 @@ files = [ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] +[[package]] +name = "requests" +version = "2.32.5" +requires_python = ">=3.9" +summary = "Python HTTP for Humans." +groups = ["default"] +dependencies = [ + "certifi>=2017.4.17", + "charset-normalizer<4,>=2", + "idna<4,>=2.5", + "urllib3<3,>=1.21.1", +] +files = [ + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, +] + [[package]] name = "sniffio" version = "1.3.1" @@ -381,6 +520,16 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] +[[package]] +name = "text-unidecode" +version = "1.3" +summary = "The most basic Text::Unidecode port" +groups = ["default"] +files = [ + {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, + {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, +] + [[package]] name = "typing-extensions" version = "4.14.1" @@ -406,6 +555,17 @@ files = [ {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, ] +[[package]] +name = "urllib3" +version = "2.5.0" +requires_python = ">=3.9" +summary = "HTTP library with thread-safe connection pooling, file post, and more." +groups = ["default"] +files = [ + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, +] + [[package]] name = "virtualenv" version = "20.34.0" diff --git a/test_automation_framework/pyproject.toml b/test_automation_framework/pyproject.toml index 67db4bda5..833a1d452 100644 --- a/test_automation_framework/pyproject.toml +++ b/test_automation_framework/pyproject.toml @@ -5,7 +5,7 @@ description = "Default template for PDM package" authors = [ {name = "asobolev", email = "aleksei_sobolev@epam.com"}, ] -dependencies = ["PyYAML==6.0.2", "dotenv==0.9.9", "httpx==0.28.1", "pre-commit==4.3.0", "pydantic-settings==2.10.1", "pydantic==2.11.7", "pytest==8.4.1"] +dependencies = ["PyYAML==6.0.2", "dotenv==0.9.9", "httpx==0.28.1", "pre-commit==4.3.0", "pydantic-settings==2.10.1", "pydantic==2.11.7", "pytest==8.4.1", "playwright==1.55.0", "pytest-playwright>=0.7.0"] requires-python = "==3.13.*" readme = "README.md" license = {text = "MIT"} diff --git a/test_automation_framework/tests/test_files.py b/test_automation_framework/tests/test_files.py index b815be05f..d25aff0b9 100644 --- a/test_automation_framework/tests/test_files.py +++ b/test_automation_framework/tests/test_files.py @@ -90,6 +90,33 @@ def test_move_file(self, file_tracker, dataset_tracker, tmp_path): if temp_file.exists(): temp_file.unlink() + def test_add_file_to_dataset_twice(self, file_tracker, dataset_tracker, tmp_path): + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + dataset = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in dataset["detail"].lower() + first_dataset_id = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}])[ + "data" + ][0]["id"] + + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_files.append(file_info) + file_id = file_info["id"] + try: + move1 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + move2 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move2["status"] is False + assert "already bounded" in move2["message"].lower() + finally: + if temp_file.exists(): + temp_file.unlink() + def test_clear_search_files(self, file_tracker, tmp_path): created_files, client = file_tracker result = client.search_files() diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py new file mode 100644 index 000000000..a94b0e2f5 --- /dev/null +++ b/test_automation_framework/tests/test_frontend.py @@ -0,0 +1,25 @@ +import pytest +from playwright.sync_api import Page, expect + + +class TestIconViewSelection: + @pytest.mark.parametrize( + "view_selector", + [ + "rect:nth-child(3)", # list view button + "path:nth-child(6)", # icon view button + ], + ) + def test_select_all_views(self, logged_in_page, view_selector): + page: Page = logged_in_page + page.locator(view_selector).click() + select_all = page.locator("label:has-text('Select All') div").first + select_all.click() + file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") + for i in range(file_inputs.count()): + expect(file_inputs.nth(i)).to_be_checked() + select_all = page.locator("label:has-text('selected') div").first + select_all.click() + file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") + for i in range(file_inputs.count()): + expect(file_inputs.nth(i)).not_to_be_checked() From 55f65e4c6ebd540d5fb0a035242c556d5dfeb79e Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 5 Sep 2025 17:33:54 +0200 Subject: [PATCH 22/37] added icon view - list view frontend tests --- .../tests/test_frontend.py | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py index a94b0e2f5..4cd69ccd0 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_frontend.py @@ -2,24 +2,35 @@ from playwright.sync_api import Page, expect +class Locators: + list_view_button = ("rect:nth-child(3)",) + icon_view_button = ("rect:nth-child(6)",) + + class TestIconViewSelection: - @pytest.mark.parametrize( - "view_selector", - [ - "rect:nth-child(3)", # list view button - "path:nth-child(6)", # icon view button - ], - ) - def test_select_all_views(self, logged_in_page, view_selector): + @pytest.mark.parametrize("rect_index", [0, 1]) + def test_select_all_views(self, logged_in_page, rect_index): page: Page = logged_in_page - page.locator(view_selector).click() + page.locator("rect").nth(rect_index).click(force=True) select_all = page.locator("label:has-text('Select All') div").first - select_all.click() + select_all.click(force=True) file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") for i in range(file_inputs.count()): expect(file_inputs.nth(i)).to_be_checked() select_all = page.locator("label:has-text('selected') div").first - select_all.click() + select_all.click(force=True) file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") for i in range(file_inputs.count()): expect(file_inputs.nth(i)).not_to_be_checked() + + @pytest.mark.parametrize( + "rect_index, file_locator", + [ + (0, "a[class^='document-card-view-item_card-item']"), + (1, "div[role='cell']"), + ], + ) + def test_view_switch(self, logged_in_page: Page, rect_index: str, file_locator: str): + page = logged_in_page + page.locator("rect").nth(rect_index).click(force=True) + expect(page.locator(file_locator).first).to_be_visible() From 63a9f10a3b1d607fc7ec715f4a53c420d65895f6 Mon Sep 17 00:00:00 2001 From: asobolev Date: Mon, 8 Sep 2025 16:36:27 +0200 Subject: [PATCH 23/37] added select-unselect one-by-one tests --- .../tests/test_frontend.py | 65 ++++++++++++++++++- 1 file changed, 62 insertions(+), 3 deletions(-) diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py index 4cd69ccd0..d028b27ba 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_frontend.py @@ -9,8 +9,8 @@ class Locators: class TestIconViewSelection: @pytest.mark.parametrize("rect_index", [0, 1]) - def test_select_all_views(self, logged_in_page, rect_index): - page: Page = logged_in_page + def test_select_all_unselect_all_both_views(self, logged_in_page, rect_index): + page = logged_in_page page.locator("rect").nth(rect_index).click(force=True) select_all = page.locator("label:has-text('Select All') div").first select_all.click(force=True) @@ -30,7 +30,66 @@ def test_select_all_views(self, logged_in_page, rect_index): (1, "div[role='cell']"), ], ) - def test_view_switch(self, logged_in_page: Page, rect_index: str, file_locator: str): + def test_view_switch(self, logged_in_page: Page, rect_index: int, file_locator: str): page = logged_in_page page.locator("rect").nth(rect_index).click(force=True) expect(page.locator(file_locator).first).to_be_visible() + + @pytest.mark.parametrize("action", ["select", "unselect"]) + def test_select_unselect_one_by_one_icon_view(self, logged_in_page: Page, action: str): + page = logged_in_page + + page.locator("rect").nth(0).click(force=True) + + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=5000) + + inputs = items.locator("input[type='checkbox']") + + if action == "unselect": + page.locator("label:has-text('Select All') div").first.click(force=True) + expect(inputs.first).to_be_checked(timeout=5000) + + count = inputs.count() + for i in range(count): + row = items.nth(i) + row.scroll_into_view_if_needed() + + input_el = row.locator("input[type='checkbox']").first + label = row.locator("label.uui-checkbox-container") + uui_div = row.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + + if action == "select": + expect(input_el).to_be_checked() + else: + expect(input_el).not_to_be_checked() + + @pytest.mark.parametrize("action", ["select", "unselect"]) + def test_select_unselect_one_by_one_list_view(self, logged_in_page: Page, action: str): + page = logged_in_page + page.locator("rect").nth(1).click(force=True) + + rows = page.locator("div.uui-table-row-container[role='row']") + expect(rows.first).to_be_visible(timeout=5000) + + count = rows.count() + assert count > 0, "no list rows found" + + if action == "unselect": + page.locator("label:has-text('Select All') div").first.click(force=True) + expect(page.locator("div.uui-checkbox > input[type='checkbox']").first).to_be_checked(timeout=5000) + + checkboxes = page.locator("div.uui-checkbox") + count = checkboxes.count() + for i in range(2, count): + cb = checkboxes.nth(i) + cb.scroll_into_view_if_needed() + cb.click(force=True) + + if action == "select": + expect(cb).to_be_checked() + else: + expect(cb).not_to_be_checked() From d2b7b7cfa45f708bde9f554598699efb8981fd4d Mon Sep 17 00:00:00 2001 From: asobolev Date: Mon, 8 Sep 2025 17:20:37 +0200 Subject: [PATCH 24/37] added add to dataset - empty field check --- .../tests/test_frontend.py | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py index d028b27ba..bd8e53f13 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_frontend.py @@ -93,3 +93,32 @@ def test_select_unselect_one_by_one_list_view(self, logged_in_page: Page, action expect(cb).to_be_checked() else: expect(cb).not_to_be_checked() + + @pytest.mark.parametrize("select_all", [True, False]) + def test_add_to_dataset_empty_field(self, logged_in_page: Page, select_all: bool): + page = logged_in_page + + page.locator("rect").nth(0).click(force=True) + + if select_all: + page.locator("label:has-text('Select All') div").first.click(force=True) + else: + item = page.locator("a[class^='document-card-view-item_card-item']").first + item.scroll_into_view_if_needed() + + input_el = item.locator("input[type='checkbox']").first + label = item.locator("label.uui-checkbox-container") + uui_div = item.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + expect(input_el).to_be_checked() + + add_button = page.get_by_role("button", name="Add to dataset") + add_button.click() + + choose_button = page.get_by_role("button", name="Choose") + choose_button.click() + + error_label = page.locator("div.uui-invalid-message[role='alert']") + expect(error_label).to_have_text("The field is mandatory") From b87eb6eb7d779b02d348d46fb5d88016816d3925 Mon Sep 17 00:00:00 2001 From: asobolev Date: Wed, 10 Sep 2025 17:46:46 +0200 Subject: [PATCH 25/37] added some upload wizard tests --- .../helpers/files/file_client_frontend.py | 58 +++++++ .../helpers/jobs/jobs_client.py | 11 ++ test_automation_framework/tests/test_files.py | 1 + .../tests/test_upload_wizard.py | 141 ++++++++++++++++++ 4 files changed, 211 insertions(+) create mode 100644 test_automation_framework/helpers/files/file_client_frontend.py create mode 100644 test_automation_framework/tests/test_upload_wizard.py diff --git a/test_automation_framework/helpers/files/file_client_frontend.py b/test_automation_framework/helpers/files/file_client_frontend.py new file mode 100644 index 000000000..b3be15e4a --- /dev/null +++ b/test_automation_framework/helpers/files/file_client_frontend.py @@ -0,0 +1,58 @@ +import uuid +import shutil +from pathlib import Path +from playwright.sync_api import Page +from logging import getLogger +import time + +logger = getLogger(__name__) + + +class FrontendFileHelper: + @staticmethod + def prepare_temp_files(tmp_path, num_files=1, suffix="pdf", base_file="multivitamin.pdf"): + data_dir = Path(__file__).parent.parent.parent / "data" + original_file = data_dir / base_file + temp_files = [] + + for _ in range(num_files): + unique_name = f"{uuid.uuid4().hex}.{suffix}" + temp_file = tmp_path / unique_name + shutil.copy(original_file, temp_file) + temp_files.append(temp_file) + + return temp_files + + @staticmethod + def upload_files( + page: Page, temp_files, file_tracker=None, client=None, base_file="multivitamin.pdf", timeout_seconds=30 + ): + page.locator("input[type='file']").set_input_files([str(f) for f in temp_files]) + + page.get_by_role("button", name="Next").click() + success_msgs = page.locator("text=Successfully uploaded, converted") + end_time = time.time() + timeout_seconds + while success_msgs.count() < len(temp_files): + if time.time() > end_time: + raise RuntimeError(f"Not all upload success messages appeared within {timeout_seconds}s") + time.sleep(0.5) + + uploaded_infos = [] + + if file_tracker is not None and client is not None: + for temp_file in temp_files: + end_time = time.time() + timeout_seconds + while time.time() < end_time: + search_resp = client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": temp_file.name}] + ) + if search_resp["data"]: + file_info = search_resp["data"][0] + file_tracker.append(file_info) + uploaded_infos.append(file_info) + break + time.sleep(1) + else: + raise RuntimeError(f"Uploaded file {temp_file.name} not found in backend after {timeout_seconds}s") + + return uploaded_infos diff --git a/test_automation_framework/helpers/jobs/jobs_client.py b/test_automation_framework/helpers/jobs/jobs_client.py index 632f3d7ce..d99228002 100644 --- a/test_automation_framework/helpers/jobs/jobs_client.py +++ b/test_automation_framework/helpers/jobs/jobs_client.py @@ -61,6 +61,17 @@ def create_job( "/jobs/jobs/create_job", json=payload, headers=self._default_headers(content_type_json=True) ) + def search_jobs( + self, + page_num: int = 1, + page_size: int = 15, + ) -> dict: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "sorting": [{"direction": "desc", "field": "creation_datetime"}], + } + return self.post_json("/jobs/jobs/search", json=payload, headers=self._default_headers(content_type_json=True)) + def get_job(self, job_id: int) -> Dict[str, Any]: return self.get_json(f"/jobs/jobs/{job_id}", headers=self._default_headers()) diff --git a/test_automation_framework/tests/test_files.py b/test_automation_framework/tests/test_files.py index d25aff0b9..2d654dccf 100644 --- a/test_automation_framework/tests/test_files.py +++ b/test_automation_framework/tests/test_files.py @@ -33,6 +33,7 @@ def test_upload_and_delete_file(self, file_tracker, tmp_path): if temp_file.exists(): temp_file.unlink() + @pytest.mark.skip(reason="Returns 500 instead of 4xx") def test_upload_invalid_format(self, file_client, tmp_path): invalid_file = tmp_path / f"{uuid.uuid4().hex}.py" invalid_file.write_text("this is py file") diff --git a/test_automation_framework/tests/test_upload_wizard.py b/test_automation_framework/tests/test_upload_wizard.py new file mode 100644 index 000000000..ce6048313 --- /dev/null +++ b/test_automation_framework/tests/test_upload_wizard.py @@ -0,0 +1,141 @@ +import uuid +import pytest +from playwright.sync_api import Page, expect +from helpers.files.file_client_frontend import FrontendFileHelper +from logging import getLogger + +logger = getLogger(__name__) + + +class TestUploadWizard: + def run_upload_workflow( + self, + page: Page, + frontend_file_helper: FrontendFileHelper, + num_files: int, + file_tracker, + client, + jobs_client, + dataset_type: str = "none", # "none", "existing", "new" + dataset_name: str = None, + tmp_path=None, + ): + created_files = file_tracker[0] + + # Open wizard + logger.info("Open wizard") + page.get_by_role("button", name="Upload Wizard").click() + + # Upload files + temp_files = frontend_file_helper.prepare_temp_files(tmp_path, num_files=num_files) + frontend_file_helper.upload_files(page, temp_files, file_tracker=created_files, client=client) + + # Dataset choice + if dataset_type == "none": + page.locator("label:has-text('No') div").nth(1).click() + page.get_by_role("button", name="Next").click() + elif dataset_type == "existing": + page.locator("label:has-text('Existing dataset') div").nth(1).click() + page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() + page.get_by_text(dataset_name, exact=True).click() + page.get_by_role("button", name="Next").click() + elif dataset_type == "new": + page.locator("label:has-text('New dataset') div").nth(1).click() + page.get_by_role("textbox", name="Dataset name").fill(dataset_name) + page.get_by_role("button", name="Next").click() + else: + raise ValueError(f"Unknown dataset_type {dataset_type}") + + # Choose pipeline + logger.info("Choose pipeline") + page.get_by_text("No need for preprocessor").click() + page.get_by_role("button", name="Next").click() + + # Fill job name + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + page.get_by_role("textbox", name="Job name").fill(job_name) + + # Select pipeline dropdown + page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() + page.get_by_text("print", exact=True).click() + + # Start extraction + logger.info("Start extraction") + page.get_by_role("button", name="Start Extraction").click() + page.wait_for_url("**/jobs/**", timeout=10000) + + # Wait for job to finish + jobs = jobs_client.search_jobs() + job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) + assert job_id, f"Job with name {job_name} not found!" + jobs_client.poll_until_finished(job_id, timeout_seconds=180) + page.reload() + expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_without_dataset( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, num_files + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_existing_dataset( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files + ): + page = logged_in_page + created_files, client = file_tracker + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + first_resp = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in first_resp["detail"].lower() + + frontend_file_helper = FrontendFileHelper() + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="existing", + dataset_name=dataset_name, + tmp_path=tmp_path, + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_new_dataset( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files + ): + page = logged_in_page + created_files, client = file_tracker + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + created_datasets.append(dataset_name) + + frontend_file_helper = FrontendFileHelper() + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="new", + dataset_name=dataset_name, + tmp_path=tmp_path, + ) From eaffdec6c1532993c927cd9ba33f38c68d1a9b01 Mon Sep 17 00:00:00 2001 From: asobolev Date: Thu, 11 Sep 2025 17:34:41 +0200 Subject: [PATCH 26/37] added more upload wizard tests --- .../helpers/files/file_client_frontend.py | 6 +- .../tests/test_upload_wizard.py | 170 ++++++++++++++---- 2 files changed, 135 insertions(+), 41 deletions(-) diff --git a/test_automation_framework/helpers/files/file_client_frontend.py b/test_automation_framework/helpers/files/file_client_frontend.py index b3be15e4a..554724e93 100644 --- a/test_automation_framework/helpers/files/file_client_frontend.py +++ b/test_automation_framework/helpers/files/file_client_frontend.py @@ -24,9 +24,7 @@ def prepare_temp_files(tmp_path, num_files=1, suffix="pdf", base_file="multivita return temp_files @staticmethod - def upload_files( - page: Page, temp_files, file_tracker=None, client=None, base_file="multivitamin.pdf", timeout_seconds=30 - ): + def upload_files(page: Page, temp_files, file_tracker=None, client=None, timeout_seconds=30): page.locator("input[type='file']").set_input_files([str(f) for f in temp_files]) page.get_by_role("button", name="Next").click() @@ -48,7 +46,7 @@ def upload_files( ) if search_resp["data"]: file_info = search_resp["data"][0] - file_tracker.append(file_info) + file_tracker[0].append(file_info) uploaded_infos.append(file_info) break time.sleep(1) diff --git a/test_automation_framework/tests/test_upload_wizard.py b/test_automation_framework/tests/test_upload_wizard.py index ce6048313..385a30e9a 100644 --- a/test_automation_framework/tests/test_upload_wizard.py +++ b/test_automation_framework/tests/test_upload_wizard.py @@ -3,68 +3,64 @@ from playwright.sync_api import Page, expect from helpers.files.file_client_frontend import FrontendFileHelper from logging import getLogger +from pathlib import Path + logger = getLogger(__name__) class TestUploadWizard: - def run_upload_workflow( - self, - page: Page, - frontend_file_helper: FrontendFileHelper, - num_files: int, - file_tracker, - client, - jobs_client, - dataset_type: str = "none", # "none", "existing", "new" - dataset_name: str = None, - tmp_path=None, - ): - created_files = file_tracker[0] - - # Open wizard - logger.info("Open wizard") - page.get_by_role("button", name="Upload Wizard").click() - - # Upload files - temp_files = frontend_file_helper.prepare_temp_files(tmp_path, num_files=num_files) - frontend_file_helper.upload_files(page, temp_files, file_tracker=created_files, client=client) - - # Dataset choice + @staticmethod + def select_dataset(page: Page, dataset_type: str, dataset_name: str = None): + logger.info(f"Select dataset option: {dataset_type}") if dataset_type == "none": page.locator("label:has-text('No') div").nth(1).click() - page.get_by_role("button", name="Next").click() elif dataset_type == "existing": page.locator("label:has-text('Existing dataset') div").nth(1).click() page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() page.get_by_text(dataset_name, exact=True).click() - page.get_by_role("button", name="Next").click() elif dataset_type == "new": page.locator("label:has-text('New dataset') div").nth(1).click() page.get_by_role("textbox", name="Dataset name").fill(dataset_name) - page.get_by_role("button", name="Next").click() else: - raise ValueError(f"Unknown dataset_type {dataset_type}") - - # Choose pipeline - logger.info("Choose pipeline") - page.get_by_text("No need for preprocessor").click() + raise ValueError(f"Unknown dataset_type: {dataset_type}") page.get_by_role("button", name="Next").click() - # Fill job name + @staticmethod + def select_preprocessor(page: Page, preprocessor: str = None, click_next: bool = True) -> None: + logger.info(f"Select preprocessor: {preprocessor or 'No need'}") + if preprocessor is None: + page.get_by_text("No need for preprocessor").click() + elif preprocessor == "any": + preprocessor_section = page.get_by_text("Select preprocessor").locator("..").locator("..") + preprocessor_section.locator("label").nth(1).click() + else: + page.get_by_text(preprocessor, exact=True).click() + if click_next: + page.get_by_role("button", name="Next").click() + + @staticmethod + def select_language(page: Page, language: str = None): + if language: + logger.info(f"Select language: {language}") + page.get_by_role("textbox", name="Please select").click() + page.get_by_text(language, exact=True).click() + page.get_by_role("button", name="Next").click() + + @staticmethod + def fill_job_and_start(page: Page, jobs_client): job_name = f"test_job_{uuid.uuid4().hex[:8]}" + logger.info(f"Fill job name: {job_name}") page.get_by_role("textbox", name="Job name").fill(job_name) - # Select pipeline dropdown + logger.info("Select pipeline dropdown") page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() page.get_by_text("print", exact=True).click() - # Start extraction logger.info("Start extraction") - page.get_by_role("button", name="Start Extraction").click() - page.wait_for_url("**/jobs/**", timeout=10000) - # Wait for job to finish + page.get_by_role("button", name="Start Extraction").click() + page.wait_for_url("**/jobs/**", timeout=20000) jobs = jobs_client.search_jobs() job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) assert job_id, f"Job with name {job_name} not found!" @@ -72,6 +68,35 @@ def run_upload_workflow( page.reload() expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + def run_upload_workflow( + self, + page: Page, + frontend_file_helper: FrontendFileHelper, + num_files: int, + file_tracker, + client, + jobs_client, + dataset_type: str = "none", # "none", "existing", "new" + dataset_name: str = None, + tmp_path=None, + language: str = None, + preprocessor: str = None, + ): + logger.info("Open wizard") + page.get_by_role("button", name="Upload Wizard").click() + + logger.info(f"Prepare {num_files} temp files") + temp_files = frontend_file_helper.prepare_temp_files(tmp_path, num_files=num_files) + frontend_file_helper.upload_files(page, temp_files, file_tracker=file_tracker, client=client) + + self.select_dataset(page, dataset_type, dataset_name) + + self.select_preprocessor(page, preprocessor=preprocessor, click_next=not language) + + self.select_language(page, language) + + return self.fill_job_and_start(page, jobs_client) + @pytest.mark.parametrize("num_files", [1, 3]) def test_upload_documents_without_dataset( self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, num_files @@ -139,3 +164,74 @@ def test_upload_documents_new_dataset( dataset_name=dataset_name, tmp_path=tmp_path, ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_with_language( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + language="English", + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_any_preprocessor( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + preprocessor="any", + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_invalid_format(self, tmp_path, logged_in_page, num_files): + page = logged_in_page + temp_files = [] + for i in range(num_files): + invalid_file = Path(tmp_path / f"{uuid.uuid4().hex}.py") + invalid_file.write_text("this is py file") + temp_files.append(invalid_file) + + logger.info("Open wizard") + page.get_by_role("button", name="Upload Wizard").click() + page.locator("input[type='file']").set_input_files([str(f) for f in temp_files]) + page.get_by_role("button", name="Next").click() + try: + expect(page.locator("text=Error occurred")).to_be_visible(timeout=2000) + logger.info("Error message appeared as expected") + except TimeoutError: + pytest.fail("Expected error message did not appear") From ec6a814ae2ffa8820cca20edc58d807697fe0050 Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 12 Sep 2025 17:29:52 +0200 Subject: [PATCH 27/37] added more upload wizard tests --- .../tests/test_upload_wizard.py | 112 +++++++++++++++++- 1 file changed, 109 insertions(+), 3 deletions(-) diff --git a/test_automation_framework/tests/test_upload_wizard.py b/test_automation_framework/tests/test_upload_wizard.py index 385a30e9a..6a4399b09 100644 --- a/test_automation_framework/tests/test_upload_wizard.py +++ b/test_automation_framework/tests/test_upload_wizard.py @@ -48,8 +48,8 @@ def select_language(page: Page, language: str = None): page.get_by_role("button", name="Next").click() @staticmethod - def fill_job_and_start(page: Page, jobs_client): - job_name = f"test_job_{uuid.uuid4().hex[:8]}" + def fill_job_and_start(page: Page, jobs_client, job_name: str): + job_name = job_name if job_name else f"test_job_{uuid.uuid4().hex[:8]}" logger.info(f"Fill job name: {job_name}") page.get_by_role("textbox", name="Job name").fill(job_name) @@ -81,6 +81,7 @@ def run_upload_workflow( tmp_path=None, language: str = None, preprocessor: str = None, + job_name: str = None, ): logger.info("Open wizard") page.get_by_role("button", name="Upload Wizard").click() @@ -95,7 +96,7 @@ def run_upload_workflow( self.select_language(page, language) - return self.fill_job_and_start(page, jobs_client) + return self.fill_job_and_start(page, jobs_client, job_name=job_name) @pytest.mark.parametrize("num_files", [1, 3]) def test_upload_documents_without_dataset( @@ -141,6 +142,33 @@ def test_upload_documents_existing_dataset( tmp_path=tmp_path, ) + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_existing_dataset_new_name( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files + ): + # should we see an error here? + page = logged_in_page + created_files, client = file_tracker + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + first_resp = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in first_resp["detail"].lower() + + frontend_file_helper = FrontendFileHelper() + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="news", + dataset_name=dataset_name, + tmp_path=tmp_path, + ) + @pytest.mark.parametrize("num_files", [1, 3]) def test_upload_documents_new_dataset( self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files @@ -217,6 +245,84 @@ def test_upload_documents_any_preprocessor( preprocessor="any", ) + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_all_settings_new_job_name( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + preprocessor="any", + language="English", + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_all_settings_existing_job_name( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + dataset_client, + dataset_tracker, + user_uuid, + job_tracker, + ): + # should we get an error here as well? + # create a job + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] + assert move_resp["status"] is True + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_tracker[0].append(create_resp) + + # run wizard + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + preprocessor="any", + language="English", + job_name=job_name, + ) + @pytest.mark.parametrize("num_files", [1, 3]) def test_upload_invalid_format(self, tmp_path, logged_in_page, num_files): page = logged_in_page From a8cdf7a2776d36a4f07bfa36b9b680d27523ed2b Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 16 Sep 2025 15:45:02 +0200 Subject: [PATCH 28/37] added controls tests --- .../tests/test_frontend.py | 91 +++++++++++++++++++ .../tests/test_upload_wizard.py | 65 ++++++++++++- 2 files changed, 154 insertions(+), 2 deletions(-) diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py index bd8e53f13..d57ddad1f 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_frontend.py @@ -122,3 +122,94 @@ def test_add_to_dataset_empty_field(self, logged_in_page: Page, select_all: bool error_label = page.locator("div.uui-invalid-message[role='alert']") expect(error_label).to_have_text("The field is mandatory") + + +class TestControls: + def test_scroll_documents(self, logged_in_page: Page): + page = logged_in_page + + last_doc = page.locator('a[class*="document-card-view-item_card-item"]').last + last_doc.scroll_into_view_if_needed() + expect(last_doc).to_be_visible() + + first_doc = page.locator('a[class*="document-card-view-item_card-item"]').first + first_doc.scroll_into_view_if_needed() + expect(first_doc).to_be_visible() + + def test_pagination_by_page_number(self, logged_in_page: Page): + page = logged_in_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + list_selector = 'a[class*="document-card-view-item_card-item"]' + first_doc = page.locator(list_selector).first + expect(first_doc).to_be_visible(timeout=10000) + + old_text = first_doc.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(page.locator(list_selector).first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or page.locator(list_selector).first.text_content() != old_text + + def test_pagination_by_arrows(self, logged_in_page: Page): + page = logged_in_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + list_selector = 'a[class*="document-card-view-item_card-item"]' + first_doc = page.locator(list_selector).first + expect(first_doc).to_be_visible(timeout=10000) + + old_text = first_doc.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(page.locator(list_selector).first).not_to_have_text(old_text, timeout=10000) + + old_text_back = page.locator(list_selector).first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(page.locator(list_selector).first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or page.locator(list_selector).first.text_content() != old_text_back + + def test_show_on_page(self, logged_in_page: Page): + page = logged_in_page + + list_selector = 'a[class*="document-card-view-item_card-item"]' + cards = page.locator(list_selector) + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + + expect(cards.first).to_be_visible(timeout=10000) + count = cards.count() + print(f"Expected at most {value} cards, got {count}") + assert count <= int(value), f"Expected at most {value} cards, got {count}" diff --git a/test_automation_framework/tests/test_upload_wizard.py b/test_automation_framework/tests/test_upload_wizard.py index 6a4399b09..0482d129c 100644 --- a/test_automation_framework/tests/test_upload_wizard.py +++ b/test_automation_framework/tests/test_upload_wizard.py @@ -4,6 +4,7 @@ from helpers.files.file_client_frontend import FrontendFileHelper from logging import getLogger from pathlib import Path +import datetime logger = getLogger(__name__) @@ -54,7 +55,7 @@ def fill_job_and_start(page: Page, jobs_client, job_name: str): page.get_by_role("textbox", name="Job name").fill(job_name) logger.info("Select pipeline dropdown") - page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() + page.get_by_role("textbox", name="Select pipeline").click() page.get_by_text("print", exact=True).click() logger.info("Start extraction") @@ -68,6 +69,44 @@ def fill_job_and_start(page: Page, jobs_client, job_name: str): page.reload() expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + def select_human_in_the_loop_and_start( + self, + page: Page, + jobs_client, + job_name: str, + validation_type: str = "Cross validation", + day: str | None = None, + annotator: str = "admin", + categories: list[str] = ["Age"], + ): + logger.info("Select Human in the loop") + page.get_by_role("tab", name="Human in the Loop").click() + + logger.info("Select validation type") + page.get_by_role("textbox", name="Select validation type").click() + page.get_by_text(validation_type, exact=True).click() + + page.get_by_role("textbox", name="DD/MM/YYYY").click() + if not day: + day = datetime.datetime.today().day + 1 + logger.info(f"Select date {day}") + page.get_by_text(str(day), exact=True).click() + + logger.info("Select annotator") + page.get_by_role("textbox", name="Select Annotators and").click() + page.get_by_role("listbox").get_by_text(annotator).click(force=True) + page.locator(".uui-input-box.-clickable.uui-focus").click() + + page.get_by_role("textbox", name="Select categories").click() + for category in categories: + logger.info(f"Select category: {category}") + page.get_by_text(category, exact=True).click() + + logger.info("Distribute annotation tasks") + page.get_by_text("Distribute annotation tasks").click() + + self.fill_job_and_start(page, jobs_client, job_name) + def run_upload_workflow( self, page: Page, @@ -82,6 +121,7 @@ def run_upload_workflow( language: str = None, preprocessor: str = None, job_name: str = None, + human_in_loop: bool = False, ): logger.info("Open wizard") page.get_by_role("button", name="Upload Wizard").click() @@ -96,7 +136,10 @@ def run_upload_workflow( self.select_language(page, language) - return self.fill_job_and_start(page, jobs_client, job_name=job_name) + if human_in_loop: + self.select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + self.fill_job_and_start(page, jobs_client, job_name=job_name) @pytest.mark.parametrize("num_files", [1, 3]) def test_upload_documents_without_dataset( @@ -341,3 +384,21 @@ def test_upload_invalid_format(self, tmp_path, logged_in_page, num_files): logger.info("Error message appeared as expected") except TimeoutError: pytest.fail("Expected error message did not appear") + + @pytest.mark.skip(reason="Returns 500 even in browser") + @pytest.mark.parametrize("num_files", [1, 3]) + def test_human_in_the_loop(self, logged_in_page: Page, num_files, file_tracker, jobs_client, tmp_path): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + self.run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + human_in_loop=True, + ) From 0acc56290470a1e6b9b419d026db87df52157072 Mon Sep 17 00:00:00 2001 From: asobolev Date: Thu, 18 Sep 2025 17:59:41 +0200 Subject: [PATCH 29/37] added more frontend tests --- .../tests/test_frontend.py | 56 ++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py index d57ddad1f..d9b15e5f1 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_frontend.py @@ -211,5 +211,59 @@ def test_show_on_page(self, logged_in_page: Page): expect(cards.first).to_be_visible(timeout=10000) count = cards.count() - print(f"Expected at most {value} cards, got {count}") assert count <= int(value), f"Expected at most {value} cards, got {count}" + + @pytest.mark.parametrize( + "flow", + [ + {"after_cancel": "Discard"}, + {"after_cancel": "Save"}, + {"choose_first": True, "after_cancel": None}, + ], + ) + def test_add_to_dataset_cancel(self, logged_in_page: Page, flow): + page = logged_in_page + first_card = page.locator("a[class*='document-card-view-item_card-item']").first + expect(first_card).to_be_visible(timeout=10000) + first_card.scroll_into_view_if_needed() + input_el = first_card.locator("input[type='checkbox']").first + label = first_card.locator("label.uui-checkbox-container") + uui_div = first_card.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + expect(input_el).to_be_checked(timeout=5000) + + page.get_by_role("button", name="Add to dataset").click() + dataset_input = page.get_by_role("textbox", name="Please select dataset") + dataset_input.click() + first_option = page.locator("div[role='option']").first + first_option.wait_for(state="visible", timeout=5000) + first_option.click() + + if flow.get("choose_first"): + page.get_by_role("button", name="Choose").click() + page.get_by_role("button", name="Cancel").click() + + if flow.get("after_cancel"): + page.get_by_role("button", name=flow["after_cancel"]).click() + + expect(page.get_by_role("button", name="Choose")).not_to_be_visible(timeout=5000) + + def test_click_preprocess(self, logged_in_page: Page): + page = logged_in_page + first_card = page.locator("a[class*='document-card-view-item_card-item']").first + expect(first_card).to_be_visible(timeout=10000) + first_card.scroll_into_view_if_needed() + input_el = first_card.locator("input[type='checkbox']").first + label = first_card.locator("label.uui-checkbox-container") + uui_div = first_card.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + expect(input_el).to_be_checked(timeout=5000) + + preprocess_btn = page.get_by_role("button", name="Preprocess") + expect(preprocess_btn).to_be_visible(timeout=5000) + preprocess_btn.click() + # what are we checking? From f60353f285edcbdff38312b10eb798af3d8be238 Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 19 Sep 2025 17:49:21 +0200 Subject: [PATCH 30/37] added ui deletion and adding to extraction tests --- .../tests/test_frontend.py | 106 ++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py index d9b15e5f1..79f74c9b3 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_frontend.py @@ -1,5 +1,6 @@ import pytest from playwright.sync_api import Page, expect +import uuid class Locators: @@ -267,3 +268,108 @@ def test_click_preprocess(self, logged_in_page: Page): expect(preprocess_btn).to_be_visible(timeout=5000) preprocess_btn.click() # what are we checking? + + def test_add_to_extraction( + self, + logged_in_page: Page, + jobs_client, + ): + page = logged_in_page + + first_card = page.locator("a[class*='document-card-view-item_card-item']").first + expect(first_card).to_be_visible(timeout=10000) + first_card.scroll_into_view_if_needed() + + checkbox = first_card.locator("input[type='checkbox']").first + label = first_card.locator("label.uui-checkbox-container") + uui_div = first_card.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else checkbox + click_target.click(force=True) + expect(checkbox).to_be_checked(timeout=5000) + + page.get_by_role("button", name="Add to extraction").click() + + first_cell = page.get_by_role("cell").first + first_cell.locator("label div").click() + expect(first_cell.locator("input[type='checkbox']")).to_be_checked(timeout=5000) + + page.get_by_role("button", name="Next").click() + + job_name = f"extraction_job_{uuid.uuid4().hex[:8]}" + page.get_by_role("textbox", name="Job name").fill(job_name) + page.get_by_role("textbox", name="Select pipeline").click() + page.get_by_text("print", exact=True).click() + + page.get_by_role("button", name="Start Extraction").click() + + page.wait_for_url("**/jobs/**", timeout=20000) + jobs = jobs_client.search_jobs() + job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) + assert job_id, f"Job with name {job_name} not found!" + jobs_client.poll_until_finished(job_id, timeout_seconds=180) + page.reload() + expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + + @pytest.mark.parametrize("num_files", [1, 3]) + @pytest.mark.parametrize("view_mode", ["card", "list"]) + def test_delete_files(self, logged_in_page: Page, file_tracker, tmp_path, num_files, view_mode): + # list view fails because deleted lines does not disappear + page = logged_in_page + created_files, client = file_tracker + temp_files = [] + uploaded_files = [] + + for _ in range(num_files): + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + uploaded_files.append(file_info) + created_files.append(file_info) + temp_files.append(temp_file) + + page.reload() + + if view_mode == "list": + page.locator("rect").nth(1).click(force=True) + rows_selector = "div.uui-table-row-container[role='row']" + items = page.locator(rows_selector) + else: + cards_selector = 'a[class*="document-card-view-item_card-item"]' + items = page.locator(cards_selector) + + expect(items.first).to_be_visible(timeout=10000) + + selected_names = [] + for f in uploaded_files: + name = f["file_name"] + element = items.filter(has_text=name).first + expect(element).to_be_visible(timeout=10000) + + checkbox = element.locator("input[type='checkbox']").first + label = element.locator("label.uui-checkbox-container") + uui_div = element.locator("div.uui-checkbox") + click_target = label.first if label.count() else uui_div.first if uui_div.count() else checkbox + + element.scroll_into_view_if_needed() + click_target.click(force=True) + expect(checkbox).to_be_checked(timeout=5000) + + selected_names.append(name) + + delete_button = page.get_by_role("button", name="Delete") + delete_button.click(force=True) + + for name in selected_names: + expect(page.get_by_text(name)).not_to_be_visible(timeout=10000) + + remaining = client.search_files()["data"] + remaining_ids = {f["id"] for f in remaining} + for f in uploaded_files: + assert f["id"] not in remaining_ids, f"File {f['file_name']} was not deleted" + for f in created_files: + if f not in uploaded_files: + assert f["id"] in remaining_ids, f"Unrelated file {f['file_name']} was deleted" + + for temp in temp_files: + if temp.exists(): + temp.unlink() From a4052f736505de3f8ac7e7c4c2e03bd031255707 Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 23 Sep 2025 18:01:31 +0200 Subject: [PATCH 31/37] added dataset adding ui tests --- .../tests/test_frontend.py | 107 +++++++++++++++++- 1 file changed, 106 insertions(+), 1 deletion(-) diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_frontend.py index 79f74c9b3..88a6fc9df 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_frontend.py @@ -43,7 +43,7 @@ def test_select_unselect_one_by_one_icon_view(self, logged_in_page: Page, action page.locator("rect").nth(0).click(force=True) items = page.locator("a[class^='document-card-view-item_card-item']") - expect(items.first).to_be_visible(timeout=5000) + expect(items.first).to_be_visible(timeout=10000) inputs = items.locator("input[type='checkbox']") @@ -373,3 +373,108 @@ def test_delete_files(self, logged_in_page: Page, file_tracker, tmp_path, num_fi for temp in temp_files: if temp.exists(): temp.unlink() + + def test_delete_dataset(self, logged_in_page: Page, dataset_tracker): + page = logged_in_page + + created, client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + create_resp = client.create_dataset(name=dataset_name) + assert "detail" in create_resp + assert "successfully created" in create_resp["detail"].lower() + search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert any(d["name"] == dataset_name for d in search_resp["data"]) + + created.append(dataset_name) + + row = page.locator(f"div[role='none']:has-text('{dataset_name}')") + expect(row).to_be_visible(timeout=10000) + + delete_button = row.locator("button", has=page.locator("svg")).last + delete_button.click(force=True) + + expect(row).not_to_be_visible(timeout=10000) + + search_after = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert all(d["name"] != dataset_name for d in search_after["data"]) + + @pytest.mark.parametrize( + "flow", + [ + {"after_cancel": "Discard"}, + {"after_cancel": "Save"}, + {"save": True, "after_cancel": None}, + ], + ) + def test_create_dataset_cancel(self, logged_in_page: Page, flow, dataset_tracker): + page = logged_in_page + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + page.get_by_role("textbox", name="Name").fill(dataset_name) + + if flow.get("save"): + dataset_modal.get_by_role("button", name="Save").click(force=True) + created.append(dataset_name) + dataset_modal.wait_for(state="detached", timeout=5000) + return + + dataset_modal.get_by_role("button", name="Cancel").click() + + if flow.get("after_cancel"): + page.wait_for_selector( + "div[role='modal']:has-text('Your data may be lost. Do you want to save data?')", + state="visible", + timeout=5000, + ) + + confirm_modal = page.locator("div[role='modal']").filter( + has_text="Your data may be lost. Do you want to save data?" + ) + if flow["after_cancel"] == "Save": + created.append(dataset_name) + + confirm_modal.get_by_role("button", name=flow["after_cancel"]).click(force=True) + + confirm_modal.wait_for(state="detached", timeout=5000) + dataset_modal.wait_for(state="detached", timeout=5000) + + expect(page.get_by_role("textbox", name="Name")).not_to_be_visible(timeout=5000) + + def test_create_dataset_no_name(self, logged_in_page: Page): + page = logged_in_page + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + + dataset_modal.get_by_role("button", name="Save").click(force=True) + error_message = dataset_modal.locator("div[role='alert'].uui-invalid-message") + expect(error_message).to_have_text("The field is mandatory", timeout=5000) + + def test_create_existing_dataset(self, logged_in_page: Page, dataset_tracker): + page = logged_in_page + created_datasets, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + + document_items = page.locator("a[class^='document-card-view-item_card-item']") + expect(document_items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + page.get_by_role("textbox", name="Name").fill(dataset_name) + dataset_modal.get_by_role("button", name="Save").click(force=True) + + expect(page.locator(f"text=Dataset {dataset_name} already exists!")).to_be_visible(timeout=30000) From dba4f95d1217228b2f4b16757d45302addd8972e Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 26 Sep 2025 09:33:18 +0200 Subject: [PATCH 32/37] added remaining plugins tests --- .../config/defaults.yaml | 3 +- test_automation_framework/conftest.py | 35 +++-- .../helpers/plugins/plugins_client.py | 36 +++-- test_automation_framework/settings.py | 1 + .../tests/test_plugins.py | 139 ++++++++++++++++-- 5 files changed, 183 insertions(+), 31 deletions(-) diff --git a/test_automation_framework/config/defaults.yaml b/test_automation_framework/config/defaults.yaml index 10cd5f9f2..17d33a05a 100644 --- a/test_automation_framework/config/defaults.yaml +++ b/test_automation_framework/config/defaults.yaml @@ -1,4 +1,5 @@ -BASE_URL: "http://demo.badgerdoc.com:8080" +BASE_URL: "http://demo.badgerdoc.com" +BASE_PORT: 8080 TIMEOUT_SECONDS: 30 MAX_WORKERS: 4 USE_MOCK_LLM: true diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index 54fee5d88..fa25adcee 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -1,6 +1,8 @@ import logging from logging import getLogger from typing import Tuple +from playwright.sync_api import expect + import pytest @@ -37,7 +39,7 @@ def tenant(settings) -> str: @pytest.fixture(scope="session") def base_client(settings) -> BaseClient: - client = BaseClient(settings.BASE_URL, timeout=10) + client = BaseClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", timeout=10) yield client client.close() @@ -59,44 +61,44 @@ def access_token(auth_token) -> str: @pytest.fixture def menu_client(settings, access_token, tenant) -> MenuClient: - return MenuClient(settings.BASE_URL, access_token, tenant) + return MenuClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) @pytest.fixture def dataset_client(settings, access_token, tenant) -> DatasetClient: - return DatasetClient(settings.BASE_URL, access_token, tenant) + return DatasetClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) @pytest.fixture def file_client(settings, access_token, tenant) -> FileClient: - return FileClient(settings.BASE_URL, access_token, tenant) + return FileClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) @pytest.fixture def jobs_client(settings, access_token, tenant) -> JobsClient: - return JobsClient(settings.BASE_URL, access_token, tenant) + return JobsClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) @pytest.fixture def reports_client(settings, access_token, tenant) -> ReportsClient: - return ReportsClient(settings.BASE_URL, access_token, tenant) + return ReportsClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) @pytest.fixture def plugins_client(settings, access_token, tenant) -> PluginsClient: - return PluginsClient(settings.BASE_URL, access_token, tenant) + return PluginsClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) @pytest.fixture def user_uuid(settings, access_token, tenant) -> str: - users_client = UsersClient(settings.BASE_URL, access_token, tenant) + users_client = UsersClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) users = users_client.search_users() return next((u.id for u in users if u.username == "admin"), None) @pytest.fixture def categories_client(settings, access_token, tenant) -> CategoriesClient: - return CategoriesClient(settings.BASE_URL, access_token, tenant) + return CategoriesClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) @pytest.fixture @@ -153,9 +155,20 @@ def plugins_tracker(plugins_client): @pytest.fixture -def logged_in_page(page: Page) -> Page: - page.goto("http://demo.badgerdoc.com:8083/login") +def logged_in_page(page: Page, settings) -> Page: + page.goto(f"{settings.BASE_URL}:8083/login", timeout=180000) page.get_by_role("textbox", name="Username").fill("admin") page.get_by_role("textbox", name="Password").fill("admin") page.get_by_role("button", name="Login", exact=True).click() return page + + +@pytest.fixture +def plugins_page(logged_in_page, settings) -> Page: + page = logged_in_page + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=100000) + page.goto(f"{settings.BASE_URL}:8083/settings/plugins") + row_cells = page.locator("div[role='row'] div[role='cell']:first-child div div") + expect(row_cells.first).to_be_visible(timeout=100000) + return page diff --git a/test_automation_framework/helpers/plugins/plugins_client.py b/test_automation_framework/helpers/plugins/plugins_client.py index 9f4d31f04..b6cf929df 100644 --- a/test_automation_framework/helpers/plugins/plugins_client.py +++ b/test_automation_framework/helpers/plugins/plugins_client.py @@ -1,6 +1,20 @@ -from typing import Any, Dict, List +from typing import List import logging from helpers.base_client.base_client import BaseClient +from dataclasses import dataclass + + +@dataclass +class Plugin: + id: int + name: str + description: str + version: str + menu_name: str + url: str + tenant: str + is_autoinstalled: bool + is_iframe: bool logger = logging.getLogger(__name__) @@ -10,8 +24,9 @@ class PluginsClient(BaseClient): def __init__(self, base_url: str, token: str, tenant: str) -> None: super().__init__(base_url, token=token, tenant=tenant) - def get_plugins(self) -> List[Dict[str, Any]]: - return self.get_json("/core/plugins", headers=self._default_headers()) + def get_plugins(self) -> List[Plugin]: + raw = self.get_json("/core/plugins", headers=self._default_headers()) + return [Plugin(**item) for item in raw] def create_plugin( self, @@ -21,7 +36,7 @@ def create_plugin( version: str = "1", description: str = "", is_iframe: bool = True, - ) -> dict: + ) -> Plugin: payload = { "name": name, "menu_name": menu_name, @@ -48,26 +63,29 @@ def create_plugin( ) try: - return self.post_json( + resp = self.post_json( "/core/plugins", json=payload, headers=headers, ) + return Plugin(**resp) except Exception as e: logger.error(f"Failed to create plugin: {e}") if hasattr(e, "body"): logger.error(f"Response body: {e.body}") raise - def update_plugin(self, plugin_id: int, **fields) -> dict: - return self.put_json( + def update_plugin(self, plugin_id: int, **fields) -> Plugin: + resp = self.put_json( f"/core/plugins/{plugin_id}", json=fields, headers=self._default_headers(content_type_json=True), ) + return Plugin(**resp) - def delete_plugin(self, plugin_id: int) -> dict: - return self.delete_json( + def delete_plugin(self, plugin_id: int) -> Plugin: + resp = self.delete_json( f"/core/plugins/{plugin_id}", headers=self._default_headers(content_type_json=True), ) + return Plugin(**resp) diff --git a/test_automation_framework/settings.py b/test_automation_framework/settings.py index 13076ce2f..5e1174c1f 100644 --- a/test_automation_framework/settings.py +++ b/test_automation_framework/settings.py @@ -9,6 +9,7 @@ class Settings(BaseSettings): BASE_URL: str + BASE_PORT: int API_USER: str API_PASS: SecretStr TIMEOUT_SECONDS: int = 30 diff --git a/test_automation_framework/tests/test_plugins.py b/test_automation_framework/tests/test_plugins.py index cf31abf83..09cba3b3b 100644 --- a/test_automation_framework/tests/test_plugins.py +++ b/test_automation_framework/tests/test_plugins.py @@ -1,11 +1,15 @@ from logging import getLogger import uuid +from playwright.sync_api import expect +import pytest + logger = getLogger(__name__) class TestPlugins: - def test_create_and_delete_plugin(self, plugins_tracker): + @pytest.mark.parametrize("iframe", [True, False]) + def test_create_and_delete_plugin(self, plugins_tracker, iframe): created, plugins_client = plugins_tracker unique_name = f"plugin_{uuid.uuid4().hex[:8]}" resp = plugins_client.create_plugin( @@ -14,19 +18,19 @@ def test_create_and_delete_plugin(self, plugins_tracker): description="bar", version="1", url="http://what.com/what", - is_iframe=True, + is_iframe=iframe, ) - plugin_id = resp["id"] + plugin_id = resp.id created.append(plugin_id) plugins = plugins_client.get_plugins() - assert any(p["id"] == plugin_id for p in plugins) - assert any(p["name"] == unique_name for p in plugins) + assert any(p.id == plugin_id for p in plugins) + assert any(p.name == unique_name for p in plugins) plugins_client.delete_plugin(plugin_id) plugins = plugins_client.get_plugins() - assert not any(p["id"] == plugin_id for p in plugins) + assert not any(p.id == plugin_id for p in plugins) def test_update_plugin(self, plugins_tracker): created, plugins_client = plugins_tracker @@ -39,7 +43,7 @@ def test_update_plugin(self, plugins_tracker): url="http://what.com/what", is_iframe=True, ) - plugin_id = resp["id"] + plugin_id = resp.id created.append(plugin_id) updated_payload = { @@ -51,8 +55,123 @@ def test_update_plugin(self, plugins_tracker): "is_iframe": True, } update_resp = plugins_client.update_plugin(plugin_id, **updated_payload) - assert update_resp["description"] == "updated desc" + assert update_resp.description == "updated desc" + + plugins = plugins_client.get_plugins() + updated = next(p for p in plugins if p.id == plugin_id) + assert updated.description == "updated desc" + + def test_view_plugins_from_settings(self, plugins_page, plugins_tracker): + page = plugins_page + created, plugins_client = plugins_tracker + plugins = plugins_client.get_plugins() + row_cells = page.locator("div[role='row'] div[role='cell']:first-child div div") + frontend_names = [row_cells.nth(i).inner_text().strip() for i in range(row_cells.count())] + api_names = [p.menu_name.strip() for p in plugins] + assert set(frontend_names) == set( + api_names + ), f"Frontend plugins {frontend_names} do not match API plugins {api_names}" + + def test_sort_plugins_by_name(self, plugins_page, plugins_tracker): + page = plugins_page + row_cells = page.locator("div[role='row'] div[role='cell']:first-child div div") + + def get_frontend_names(): + return [row_cells.nth(i).inner_text().strip() for i in range(row_cells.count())] + + initial_names = get_frontend_names() + assert initial_names, "No plugins loaded in frontend table" + + name_header = page.locator("div[role='columnheader'] >> text=Name") + expect(name_header).to_be_visible() + name_header.click() + + asc_names = get_frontend_names() + assert asc_names == sorted( + asc_names, key=lambda x: x.lower() + ), f"Plugins not sorted ascending by name: {asc_names}" + + name_header.click() + desc_names = get_frontend_names() + assert desc_names == sorted( + desc_names, key=lambda x: x.lower(), reverse=True + ), f"Plugins not sorted descending by name: {desc_names}" + + @pytest.mark.parametrize("delete_action", ["confirm", "cancel"]) + @pytest.mark.parametrize("iframe", [False, True]) + def test_create_and_delete_plugin_via_ui(self, plugins_page, plugins_tracker, delete_action, iframe): + created, plugins_client = plugins_tracker + page = plugins_page + + plugin_name = f"plugin_{uuid.uuid4().hex[:6]}" + menu_name = f"menu_{uuid.uuid4().hex[:6]}" + description = "test plugin description" + version = "1.0" + url = "http://what.com/what" + + page.get_by_role("button", name="Add Plugin").click() + page.get_by_role("textbox").nth(0).fill(plugin_name) + page.get_by_role("textbox").nth(1).fill(menu_name) + page.get_by_role("textbox").nth(2).fill(description) + page.get_by_role("textbox").nth(3).fill(version) + page.get_by_role("textbox", name="http://example.com/plugin").fill(url) + + if not iframe: + page.locator("label", has_text="Is Iframe Plugin?").locator("div").nth(1).click() + + page.get_by_role("button", name="Save").click() + + row = page.get_by_role("row", name=menu_name) + expect(row).to_be_visible(timeout=30000) + + plugins = plugins_client.get_plugins() + plugin = next((p for p in plugins if p.name == plugin_name), None) + created.append(plugin.id) + assert plugin, f"Plugin {plugin_name} not found in API" + assert plugin.is_iframe == iframe + + row.get_by_role("button").click() + page.get_by_role("button", name=delete_action.capitalize()).click() + + if delete_action == "confirm": + expect(page.get_by_role("row", name=menu_name)).not_to_be_visible(timeout=30000) + plugins = plugins_client.get_plugins() + assert all(plugin.id != p.id for p in plugins) + else: + expect(row).to_be_visible(timeout=30000) + plugins = plugins_client.get_plugins() + assert any(p.id == plugin.id for p in plugins) + + @pytest.mark.parametrize("missing_field", ["plugin_name", "menu_name", "version", "url"]) + def test_validate_mandatory_fields(self, plugins_page, missing_field, plugins_tracker): + created, plugins_client = plugins_tracker + page = plugins_page + + page.get_by_role("button", name="Add Plugin").click() + + plugin_name = f"plugin_{uuid.uuid4().hex[:6]}" if missing_field != "plugin_name" else "" + menu_name = f"menu_{uuid.uuid4().hex[:6]}" + version = "1.0" + url = "http://what.com/what" + + if missing_field != "plugin_name": + page.get_by_role("textbox").nth(0).fill(plugin_name) + if missing_field != "menu_name": + page.get_by_role("textbox").nth(1).fill(menu_name) + if missing_field != "version": + page.get_by_role("textbox").nth(3).fill(version) + if missing_field != "url": + page.get_by_role("textbox", name="http://example.com/plugin").fill(url) + + page.get_by_role("button", name="Save").click() + + if missing_field == "url": + expect(page.get_by_text("Please enter a valid URL starting with http://")).to_be_visible() + return plugins = plugins_client.get_plugins() - updated = next(p for p in plugins if p["id"] == plugin_id) - assert updated["description"] == "updated desc" + plugin = next((p for p in plugins if p.name == plugin_name), None) + assert plugin, f"Plugin {plugin_name} was not created (unexpected)" + created.append(plugin.id) + + pytest.fail(f"Validation missing for {missing_field}, plugin {plugin.id} was created") From d473f981ec39618c88c827f5f16b67007facee2e Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 30 Sep 2025 15:03:23 +0200 Subject: [PATCH 33/37] new jobs tests + some refactoring + adding steps --- test_automation_framework/conftest.py | 13 +- .../helpers/steps/jobs_creation.py | 184 +++++++ .../tests/test_datasets.py | 137 +++++ .../{test_frontend.py => test_documents.py} | 484 +++++++++++------- test_automation_framework/tests/test_files.py | 224 -------- test_automation_framework/tests/test_jobs.py | 116 ++++- .../tests/test_upload_wizard.py | 155 +----- 7 files changed, 744 insertions(+), 569 deletions(-) create mode 100644 test_automation_framework/helpers/steps/jobs_creation.py rename test_automation_framework/tests/{test_frontend.py => test_documents.py} (58%) delete mode 100644 test_automation_framework/tests/test_files.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index fa25adcee..f90f89d2b 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -160,15 +160,24 @@ def logged_in_page(page: Page, settings) -> Page: page.get_by_role("textbox", name="Username").fill("admin") page.get_by_role("textbox", name="Password").fill("admin") page.get_by_role("button", name="Login", exact=True).click() + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=100000) return page @pytest.fixture def plugins_page(logged_in_page, settings) -> Page: page = logged_in_page - items = page.locator("a[class^='document-card-view-item_card-item']") - expect(items.first).to_be_visible(timeout=100000) page.goto(f"{settings.BASE_URL}:8083/settings/plugins") row_cells = page.locator("div[role='row'] div[role='cell']:first-child div div") expect(row_cells.first).to_be_visible(timeout=100000) return page + + +@pytest.fixture +def jobs_page(logged_in_page, settings) -> Page: + page = logged_in_page + page.goto(f"{settings.BASE_URL}:8083/jobs") + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + expect(rows.first).to_be_visible(timeout=5000) + return page diff --git a/test_automation_framework/helpers/steps/jobs_creation.py b/test_automation_framework/helpers/steps/jobs_creation.py new file mode 100644 index 000000000..3ec601727 --- /dev/null +++ b/test_automation_framework/helpers/steps/jobs_creation.py @@ -0,0 +1,184 @@ +import uuid +from playwright.sync_api import Page, expect +from helpers.files.file_client_frontend import FrontendFileHelper +from logging import getLogger +import datetime + +logger = getLogger(__name__) + + +def select_dataset(page: Page, dataset_type: str, dataset_name: str = None): + logger.info(f"Select dataset option: {dataset_type}") + if dataset_type == "none": + page.locator("label:has-text('No') div").nth(1).click() + elif dataset_type == "existing": + page.locator("label:has-text('Existing dataset') div").nth(1).click() + page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() + page.get_by_text(dataset_name, exact=True).click() + elif dataset_type == "new": + page.locator("label:has-text('New dataset') div").nth(1).click() + page.get_by_role("textbox", name="Dataset name").fill(dataset_name) + else: + raise ValueError(f"Unknown dataset_type: {dataset_type}") + page.get_by_role("button", name="Next").click() + + +def select_preprocessor(page: Page, preprocessor: str = None, click_next: bool = True) -> None: + logger.info(f"Select preprocessor: {preprocessor or 'No need'}") + if preprocessor is None: + page.get_by_text("No need for preprocessor").click() + elif preprocessor == "any": + preprocessor_section = page.get_by_text("Select preprocessor").locator("..").locator("..") + preprocessor_section.locator("label").nth(1).click() + else: + page.get_by_text(preprocessor, exact=True).click() + if click_next: + page.get_by_role("button", name="Next").click() + + +def select_language(page: Page, language: str = None): + if language: + logger.info(f"Select language: {language}") + page.get_by_role("textbox", name="Please select").click() + page.get_by_text(language, exact=True).click() + page.get_by_role("button", name="Next").click() + + +def fill_job_and_start(page: Page, jobs_client, job_name: str, pipeline_manager=None): + job_name = job_name if job_name else f"test_job_{uuid.uuid4().hex[:8]}" + logger.info(f"Fill job name: {job_name}") + page.get_by_role("textbox", name="Job name").fill(job_name) + + if pipeline_manager: + logger.info(f"Select pipeline manager: {pipeline_manager}") + page.get_by_text(pipeline_manager).nth(1).click() + + logger.info("Select pipeline dropdown") + page.get_by_role("textbox", name="Select pipeline").click() + page.get_by_text("print", exact=True).click() + + logger.info("Start extraction") + + page.get_by_role("button", name="Start Extraction").click() + page.wait_for_url("**/jobs/**", timeout=20000) + jobs = jobs_client.search_jobs() + job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) + assert job_id, f"Job with name {job_name} not found!" + jobs_client.poll_until_finished(job_id, timeout_seconds=180) + page.reload() + expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + + +def select_human_in_the_loop_and_start( + page: Page, + jobs_client, + job_name: str, + validation_type: str = "Cross validation", + day: str | None = None, + annotator: str = "admin", + categories: list[str] = ["Age"], +): + logger.info("Select Human in the loop") + page.get_by_role("tab", name="Human in the Loop").click() + + logger.info("Select validation type") + page.get_by_role("textbox", name="Select validation type").click() + page.get_by_text(validation_type, exact=True).click() + + page.get_by_role("textbox", name="DD/MM/YYYY").click() + if not day: + day = datetime.datetime.today().day + 1 + logger.info(f"Select date {day}") + page.get_by_text(str(day), exact=True).click() + + logger.info("Select annotator") + page.get_by_role("textbox", name="Select Annotators and").click() + page.get_by_role("listbox").get_by_text(annotator).click(force=True) + page.locator(".uui-input-box.-clickable.uui-focus").click() + + page.get_by_role("textbox", name="Select categories").click() + for category in categories: + logger.info(f"Select category: {category}") + page.get_by_text(category, exact=True).click() + + logger.info("Distribute annotation tasks") + page.get_by_text("Distribute annotation tasks").click() + + fill_job_and_start(page, jobs_client, job_name) + + +def prepare_files(page: Page, file_tracker, frontend_file_helper: FrontendFileHelper, tmp_path, num_files, client): + logger.info(f"Prepare {num_files} temp files") + temp_files = frontend_file_helper.prepare_temp_files(tmp_path, num_files=num_files) + files = frontend_file_helper.upload_files(page, temp_files, file_tracker=file_tracker, client=client) + return files + + +def select_files(page: Page, document_names): + logger.info(f"Select files: {document_names}") + for file_name in document_names: + row = page.locator(f"text={file_name}").first + checkbox_label = row.locator("xpath=preceding-sibling::label") + checkbox_label.click(force=True) + + page.get_by_role("button", name="Next").click() + + +def run_upload_workflow( + page: Page, + frontend_file_helper: FrontendFileHelper, + num_files: int, + file_tracker, + client, + jobs_client, + dataset_type: str = "none", # "none", "existing", "new" + dataset_name: str = None, + tmp_path=None, + language: str = None, + preprocessor: str = None, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + logger.info("Open wizard") + page.get_by_role("button", name="Upload Wizard").click() + + prepare_files(page, file_tracker, frontend_file_helper, tmp_path, num_files, client) + + select_dataset(page, dataset_type, dataset_name) + + select_preprocessor(page, preprocessor=preprocessor, click_next=not language) + + select_language(page, language) + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + + +def run_new_job_workflow( + page: Page, + num_files: int, + file_tracker, + jobs_client, + tmp_path=None, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + for i in range(num_files): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + files = [file["file_name"] for file in created_files] + + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + + select_files(page, files) + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) diff --git a/test_automation_framework/tests/test_datasets.py b/test_automation_framework/tests/test_datasets.py index c730e511d..f81e2efe2 100644 --- a/test_automation_framework/tests/test_datasets.py +++ b/test_automation_framework/tests/test_datasets.py @@ -1,5 +1,6 @@ from logging import getLogger import uuid +from playwright.sync_api import Page, expect import pytest @@ -110,3 +111,139 @@ def test_search_multiple_existing_datasets(self, dataset_tracker): search_resp = client.search(filters=[{"field": "name", "operator": "in", "value": names}]) found_names = {d["name"] for d in search_resp["data"]} assert set(names) <= found_names + + +class TestDatasetsFrontend: + def test_delete_dataset(self, logged_in_page: Page, dataset_tracker): + page = logged_in_page + + created, client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + create_resp = client.create_dataset(name=dataset_name) + assert "detail" in create_resp + assert "successfully created" in create_resp["detail"].lower() + search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert any(d["name"] == dataset_name for d in search_resp["data"]) + + created.append(dataset_name) + + row = page.locator(f"div[role='none']:has-text('{dataset_name}')") + expect(row).to_be_visible(timeout=10000) + + delete_button = row.locator("button", has=page.locator("svg")).last + delete_button.click(force=True) + + expect(row).not_to_be_visible(timeout=10000) + + search_after = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert all(d["name"] != dataset_name for d in search_after["data"]) + + @pytest.mark.parametrize( + "flow", + [ + {"after_cancel": "Discard"}, + {"after_cancel": "Save"}, + {"save": True, "after_cancel": None}, + ], + ) + def test_create_dataset_cancel(self, logged_in_page: Page, flow, dataset_tracker): + page = logged_in_page + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + page.get_by_role("textbox", name="Name").fill(dataset_name) + + if flow.get("save"): + dataset_modal.get_by_role("button", name="Save").click(force=True) + created.append(dataset_name) + dataset_modal.wait_for(state="detached", timeout=5000) + return + + dataset_modal.get_by_role("button", name="Cancel").click() + + if flow.get("after_cancel"): + page.wait_for_selector( + "div[role='modal']:has-text('Your data may be lost. Do you want to save data?')", + state="visible", + timeout=5000, + ) + + confirm_modal = page.locator("div[role='modal']").filter( + has_text="Your data may be lost. Do you want to save data?" + ) + if flow["after_cancel"] == "Save": + created.append(dataset_name) + + confirm_modal.get_by_role("button", name=flow["after_cancel"]).click(force=True) + + confirm_modal.wait_for(state="detached", timeout=5000) + dataset_modal.wait_for(state="detached", timeout=5000) + + expect(page.get_by_role("textbox", name="Name")).not_to_be_visible(timeout=5000) + + def test_create_dataset_no_name(self, logged_in_page: Page): + page = logged_in_page + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + + dataset_modal.get_by_role("button", name="Save").click(force=True) + error_message = dataset_modal.locator("div[role='alert'].uui-invalid-message") + expect(error_message).to_have_text("The field is mandatory", timeout=5000) + + def test_create_existing_dataset(self, logged_in_page: Page, dataset_tracker): + page = logged_in_page + created_datasets, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + + document_items = page.locator("a[class^='document-card-view-item_card-item']") + expect(document_items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + page.get_by_role("textbox", name="Name").fill(dataset_name) + dataset_modal.get_by_role("button", name="Save").click(force=True) + + expect(page.locator(f"text=Dataset {dataset_name} already exists!")).to_be_visible(timeout=30000) + + @pytest.mark.parametrize("select_all", [True, False]) + def test_add_to_dataset_empty_field(self, logged_in_page: Page, select_all: bool): + page = logged_in_page + + page.locator("rect").nth(0).click(force=True) + + if select_all: + page.locator("label:has-text('Select All') div").first.click(force=True) + else: + item = page.locator("a[class^='document-card-view-item_card-item']").first + item.scroll_into_view_if_needed() + + input_el = item.locator("input[type='checkbox']").first + label = item.locator("label.uui-checkbox-container") + uui_div = item.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + expect(input_el).to_be_checked() + + add_button = page.get_by_role("button", name="Add to dataset") + add_button.click() + + choose_button = page.get_by_role("button", name="Choose") + choose_button.click() + + error_label = page.locator("div.uui-invalid-message[role='alert']") + expect(error_label).to_have_text("The field is mandatory") diff --git a/test_automation_framework/tests/test_frontend.py b/test_automation_framework/tests/test_documents.py similarity index 58% rename from test_automation_framework/tests/test_frontend.py rename to test_automation_framework/tests/test_documents.py index 88a6fc9df..3fefa3e37 100644 --- a/test_automation_framework/tests/test_frontend.py +++ b/test_automation_framework/tests/test_documents.py @@ -1,132 +1,237 @@ +from logging import getLogger +from datetime import datetime + import pytest from playwright.sync_api import Page, expect import uuid -class Locators: - list_view_button = ("rect:nth-child(3)",) - icon_view_button = ("rect:nth-child(6)",) - - -class TestIconViewSelection: - @pytest.mark.parametrize("rect_index", [0, 1]) - def test_select_all_unselect_all_both_views(self, logged_in_page, rect_index): - page = logged_in_page - page.locator("rect").nth(rect_index).click(force=True) - select_all = page.locator("label:has-text('Select All') div").first - select_all.click(force=True) - file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") - for i in range(file_inputs.count()): - expect(file_inputs.nth(i)).to_be_checked() - select_all = page.locator("label:has-text('selected') div").first - select_all.click(force=True) - file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") - for i in range(file_inputs.count()): - expect(file_inputs.nth(i)).not_to_be_checked() - - @pytest.mark.parametrize( - "rect_index, file_locator", - [ - (0, "a[class^='document-card-view-item_card-item']"), - (1, "div[role='cell']"), - ], - ) - def test_view_switch(self, logged_in_page: Page, rect_index: int, file_locator: str): - page = logged_in_page - page.locator("rect").nth(rect_index).click(force=True) - expect(page.locator(file_locator).first).to_be_visible() - - @pytest.mark.parametrize("action", ["select", "unselect"]) - def test_select_unselect_one_by_one_icon_view(self, logged_in_page: Page, action: str): - page = logged_in_page - - page.locator("rect").nth(0).click(force=True) - - items = page.locator("a[class^='document-card-view-item_card-item']") - expect(items.first).to_be_visible(timeout=10000) - - inputs = items.locator("input[type='checkbox']") +from helpers.base_client.base_client import HTTPError - if action == "unselect": - page.locator("label:has-text('Select All') div").first.click(force=True) - expect(inputs.first).to_be_checked(timeout=5000) +logger = getLogger(__name__) - count = inputs.count() - for i in range(count): - row = items.nth(i) - row.scroll_into_view_if_needed() - input_el = row.locator("input[type='checkbox']").first - label = row.locator("label.uui-checkbox-container") - uui_div = row.locator("div.uui-checkbox") - - click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el - click_target.click(force=True) - - if action == "select": - expect(input_el).to_be_checked() - else: - expect(input_el).not_to_be_checked() - - @pytest.mark.parametrize("action", ["select", "unselect"]) - def test_select_unselect_one_by_one_list_view(self, logged_in_page: Page, action: str): - page = logged_in_page - page.locator("rect").nth(1).click(force=True) - - rows = page.locator("div.uui-table-row-container[role='row']") - expect(rows.first).to_be_visible(timeout=5000) - - count = rows.count() - assert count > 0, "no list rows found" - - if action == "unselect": - page.locator("label:has-text('Select All') div").first.click(force=True) - expect(page.locator("div.uui-checkbox > input[type='checkbox']").first).to_be_checked(timeout=5000) - - checkboxes = page.locator("div.uui-checkbox") - count = checkboxes.count() - for i in range(2, count): - cb = checkboxes.nth(i) - cb.scroll_into_view_if_needed() - cb.click(force=True) - - if action == "select": - expect(cb).to_be_checked() - else: - expect(cb).not_to_be_checked() - - @pytest.mark.parametrize("select_all", [True, False]) - def test_add_to_dataset_empty_field(self, logged_in_page: Page, select_all: bool): - page = logged_in_page - - page.locator("rect").nth(0).click(force=True) - - if select_all: - page.locator("label:has-text('Select All') div").first.click(force=True) - else: - item = page.locator("a[class^='document-card-view-item_card-item']").first - item.scroll_into_view_if_needed() +class Locators: + list_view_button = ("rect:nth-child(3)",) + icon_view_button = ("rect:nth-child(6)",) - input_el = item.locator("input[type='checkbox']").first - label = item.locator("label.uui-checkbox-container") - uui_div = item.locator("div.uui-checkbox") - click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el - click_target.click(force=True) - expect(input_el).to_be_checked() +class TestDocumentsAPI: + def test_upload_and_delete_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + try: + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + assert "id" in file_info + assert "file_name" in file_info + created_files.append(file_info) + search = client.search_files() + ids = [f["id"] for f in search["data"]] + assert file_info["id"] in ids + delete_result = client.delete_files([file_info["id"]]) + assert delete_result[0]["status"] is True + assert delete_result[0]["action"] == "delete" + search_after = client.search_files() + ids_after = [f["id"] for f in search_after["data"]] + assert file_info["id"] not in ids_after + created_files.clear() + finally: + if temp_file.exists(): + temp_file.unlink() + + @pytest.mark.skip(reason="Returns 500 instead of 4xx") + def test_upload_invalid_format(self, file_client, tmp_path): + invalid_file = tmp_path / f"{uuid.uuid4().hex}.py" + invalid_file.write_text("this is py file") + + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(invalid_file)) + + assert exc.value.status_code == 400 + + @pytest.mark.skip(reason="Uploads a file, but returns 500") + @pytest.mark.parametrize("content", ["", " "]) + def test_upload_empty_file(self, file_client, tmp_path, content): + empty_file = tmp_path / f"{uuid.uuid4().hex}_empty.pdf" + empty_file.write_text(content) + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(empty_file)) + assert exc.value.status_code == 400 + + def test_move_file(self, file_tracker, dataset_tracker, tmp_path): + created_datasets, dataset_client = dataset_tracker + + first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + + first_resp = dataset_client.create_dataset(name=first_dataset_name) + created_datasets.append(first_dataset_name) + assert "successfully created" in first_resp["detail"].lower() + first_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}] + )["data"][0]["id"] + + second_resp = dataset_client.create_dataset(name=second_dataset_name) + created_datasets.append(second_dataset_name) + assert "successfully created" in second_resp["detail"].lower() + second_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}] + )["data"][0]["id"] - add_button = page.get_by_role("button", name="Add to dataset") - add_button.click() + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_files.append(file_info) + file_id = file_info["id"] + try: + move1 = client.move_files(name=first_dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + move2 = client.move_files(name=second_dataset_name, objects=[file_id])[0] + assert move2["status"] is True + assert "successfully bounded" in move2["message"].lower() + files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_second) + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_add_file_to_dataset_twice(self, file_tracker, dataset_tracker, tmp_path): + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + dataset = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in dataset["detail"].lower() + first_dataset_id = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}])[ + "data" + ][0]["id"] - choose_button = page.get_by_role("button", name="Choose") - choose_button.click() + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_files.append(file_info) + file_id = file_info["id"] + try: + move1 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + move2 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move2["status"] is False + assert "already bounded" in move2["message"].lower() + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_clear_search_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + result = client.search_files() + assert "pagination" in result + assert "data" in result + assert isinstance(result["data"], list) + pagination = result["pagination"] + required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} + assert required_pagination_keys <= pagination.keys() + for file in result["data"]: + required_file_keys = { + "id", + "original_name", + "bucket", + "size_in_bytes", + "extension", + "original_ext", + "content_type", + "pages", + "last_modified", + "status", + "path", + "datasets", + } + assert required_file_keys <= file.keys() + assert isinstance(file["id"], int) + assert isinstance(file["original_name"], str) + assert isinstance(file["size_in_bytes"], int) + + def test_search_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + try: + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + search_resp = client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": file_info["file_name"]}] + ) + names = [f["original_name"] for f in search_resp["data"]] + assert file_info["file_name"] in names + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_search_non_existing_file(self, file_client): + search_resp = file_client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": "definitely_not_a_file.pdf"}] + ) + assert search_resp["data"] == [] + + def test_search_multiple_existing_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + f1, t1 = client.upload_temp_file(client, file_tracker, tmp_path) + f2, t2 = client.upload_temp_file(client, file_tracker, tmp_path) + names = [f1["file_name"], f2["file_name"]] - error_label = page.locator("div.uui-invalid-message[role='alert']") - expect(error_label).to_have_text("The field is mandatory") + search = client.search_files(filters=[{"field": "original_name", "operator": "in", "value": names}]) + found_names = {f["original_name"] for f in search["data"]} + assert set(names) <= found_names + t1.unlink(missing_ok=True) + t2.unlink(missing_ok=True) -class TestControls: - def test_scroll_documents(self, logged_in_page: Page): + def test_download_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + + content = client.download_file(file_id) + assert isinstance(content, (bytes, bytearray)) + assert len(content) > 100 + assert content.startswith(b"%PDF") + + temp_file.unlink(missing_ok=True) + + def test_download_nonexistent_file(self, file_client): + with pytest.raises(HTTPError) as exc: + file_client.download_file(9999999) + assert exc.value.status_code == 404 + + @pytest.mark.parametrize("field", ["original_name", "last_modified", "size_in_bytes"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # name descending fails + def test_files_sorting(self, file_client, field, direction): + resp = file_client.post_json( + "/assets/files/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [{"field": "original_name", "operator": "ilike", "value": "%%"}], + "sorting": [{"direction": direction, "field": field}], + }, + headers=file_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + values = [d[field] for d in data if field in d] + + if field == "last_modified": + values = [datetime.fromisoformat(v) for v in values] + + if field == "size_in_bytes": + values = [int(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected, f"{field} not sorted {direction}" + + +class TestDocumentsFrontend: + def test_documents_scroll(self, logged_in_page: Page): page = logged_in_page last_doc = page.locator('a[class*="document-card-view-item_card-item"]').last @@ -137,7 +242,7 @@ def test_scroll_documents(self, logged_in_page: Page): first_doc.scroll_into_view_if_needed() expect(first_doc).to_be_visible() - def test_pagination_by_page_number(self, logged_in_page: Page): + def test_documents_pagination_by_page_number(self, logged_in_page: Page): page = logged_in_page nav = page.locator('nav[role="navigation"]') @@ -158,7 +263,7 @@ def test_pagination_by_page_number(self, logged_in_page: Page): active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") assert active_attr == "true" or page.locator(list_selector).first.text_content() != old_text - def test_pagination_by_arrows(self, logged_in_page: Page): + def test_documents_pagination_by_arrows(self, logged_in_page: Page): page = logged_in_page nav = page.locator('nav[role="navigation"]') @@ -189,7 +294,7 @@ def test_pagination_by_arrows(self, logged_in_page: Page): active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") assert active_attr_1 == "true" or page.locator(list_selector).first.text_content() != old_text_back - def test_show_on_page(self, logged_in_page: Page): + def test_documents_show_on_page(self, logged_in_page: Page): page = logged_in_page list_selector = 'a[class*="document-card-view-item_card-item"]' @@ -374,107 +479,88 @@ def test_delete_files(self, logged_in_page: Page, file_tracker, tmp_path, num_fi if temp.exists(): temp.unlink() - def test_delete_dataset(self, logged_in_page: Page, dataset_tracker): + @pytest.mark.parametrize("rect_index", [0, 1]) + def test_select_all_unselect_all_both_views(self, logged_in_page, rect_index): page = logged_in_page - - created, client = dataset_tracker - - dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - create_resp = client.create_dataset(name=dataset_name) - assert "detail" in create_resp - assert "successfully created" in create_resp["detail"].lower() - search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) - assert any(d["name"] == dataset_name for d in search_resp["data"]) - - created.append(dataset_name) - - row = page.locator(f"div[role='none']:has-text('{dataset_name}')") - expect(row).to_be_visible(timeout=10000) - - delete_button = row.locator("button", has=page.locator("svg")).last - delete_button.click(force=True) - - expect(row).not_to_be_visible(timeout=10000) - - search_after = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) - assert all(d["name"] != dataset_name for d in search_after["data"]) + page.locator("rect").nth(rect_index).click(force=True) + select_all = page.locator("label:has-text('Select All') div").first + select_all.click(force=True) + file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") + for i in range(file_inputs.count()): + expect(file_inputs.nth(i)).to_be_checked() + select_all = page.locator("label:has-text('selected') div").first + select_all.click(force=True) + file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") + for i in range(file_inputs.count()): + expect(file_inputs.nth(i)).not_to_be_checked() @pytest.mark.parametrize( - "flow", + "rect_index, file_locator", [ - {"after_cancel": "Discard"}, - {"after_cancel": "Save"}, - {"save": True, "after_cancel": None}, + (0, "a[class^='document-card-view-item_card-item']"), + (1, "div[role='cell']"), ], ) - def test_create_dataset_cancel(self, logged_in_page: Page, flow, dataset_tracker): + def test_view_switch(self, logged_in_page: Page, rect_index: int, file_locator: str): page = logged_in_page - created, client = dataset_tracker - dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - items = page.locator("a[class^='document-card-view-item_card-item']") - expect(items.first).to_be_visible(timeout=10000) + page.locator("rect").nth(rect_index).click(force=True) + expect(page.locator(file_locator).first).to_be_visible() - page.get_by_role("button", name="Add new dataset").click() - dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") - dataset_modal.wait_for(state="visible", timeout=5000) - page.get_by_role("textbox", name="Name").fill(dataset_name) + @pytest.mark.parametrize("action", ["select", "unselect"]) + def test_select_unselect_one_by_one_icon_view(self, logged_in_page: Page, action: str): + page = logged_in_page - if flow.get("save"): - dataset_modal.get_by_role("button", name="Save").click(force=True) - created.append(dataset_name) - dataset_modal.wait_for(state="detached", timeout=5000) - return + page.locator("rect").nth(0).click(force=True) - dataset_modal.get_by_role("button", name="Cancel").click() + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) - if flow.get("after_cancel"): - page.wait_for_selector( - "div[role='modal']:has-text('Your data may be lost. Do you want to save data?')", - state="visible", - timeout=5000, - ) + inputs = items.locator("input[type='checkbox']") - confirm_modal = page.locator("div[role='modal']").filter( - has_text="Your data may be lost. Do you want to save data?" - ) - if flow["after_cancel"] == "Save": - created.append(dataset_name) + if action == "unselect": + page.locator("label:has-text('Select All') div").first.click(force=True) + expect(inputs.first).to_be_checked(timeout=5000) - confirm_modal.get_by_role("button", name=flow["after_cancel"]).click(force=True) + count = inputs.count() + for i in range(count): + row = items.nth(i) + row.scroll_into_view_if_needed() - confirm_modal.wait_for(state="detached", timeout=5000) - dataset_modal.wait_for(state="detached", timeout=5000) + input_el = row.locator("input[type='checkbox']").first + label = row.locator("label.uui-checkbox-container") + uui_div = row.locator("div.uui-checkbox") - expect(page.get_by_role("textbox", name="Name")).not_to_be_visible(timeout=5000) + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) - def test_create_dataset_no_name(self, logged_in_page: Page): - page = logged_in_page - items = page.locator("a[class^='document-card-view-item_card-item']") - expect(items.first).to_be_visible(timeout=10000) + if action == "select": + expect(input_el).to_be_checked() + else: + expect(input_el).not_to_be_checked() - page.get_by_role("button", name="Add new dataset").click() - dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") - dataset_modal.wait_for(state="visible", timeout=5000) + @pytest.mark.parametrize("action", ["select", "unselect"]) + def test_select_unselect_one_by_one_list_view(self, logged_in_page: Page, action: str): + page = logged_in_page + page.locator("rect").nth(1).click(force=True) - dataset_modal.get_by_role("button", name="Save").click(force=True) - error_message = dataset_modal.locator("div[role='alert'].uui-invalid-message") - expect(error_message).to_have_text("The field is mandatory", timeout=5000) + rows = page.locator("div.uui-table-row-container[role='row']") + expect(rows.first).to_be_visible(timeout=5000) - def test_create_existing_dataset(self, logged_in_page: Page, dataset_tracker): - page = logged_in_page - created_datasets, client = dataset_tracker - dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - resp = client.create_dataset(name=dataset_name) - created_datasets.append(dataset_name) - assert "successfully created" in resp["detail"].lower() + count = rows.count() + assert count > 0, "no list rows found" - document_items = page.locator("a[class^='document-card-view-item_card-item']") - expect(document_items.first).to_be_visible(timeout=10000) + if action == "unselect": + page.locator("label:has-text('Select All') div").first.click(force=True) + expect(page.locator("div.uui-checkbox > input[type='checkbox']").first).to_be_checked(timeout=5000) - page.get_by_role("button", name="Add new dataset").click() - dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") - dataset_modal.wait_for(state="visible", timeout=5000) - page.get_by_role("textbox", name="Name").fill(dataset_name) - dataset_modal.get_by_role("button", name="Save").click(force=True) + checkboxes = page.locator("div.uui-checkbox") + count = checkboxes.count() + for i in range(2, count): + cb = checkboxes.nth(i) + cb.scroll_into_view_if_needed() + cb.click(force=True) - expect(page.locator(f"text=Dataset {dataset_name} already exists!")).to_be_visible(timeout=30000) + if action == "select": + expect(cb).to_be_checked() + else: + expect(cb).not_to_be_checked() diff --git a/test_automation_framework/tests/test_files.py b/test_automation_framework/tests/test_files.py deleted file mode 100644 index 2d654dccf..000000000 --- a/test_automation_framework/tests/test_files.py +++ /dev/null @@ -1,224 +0,0 @@ -from logging import getLogger -from datetime import datetime -import uuid - -import pytest - - -from helpers.base_client.base_client import HTTPError - -logger = getLogger(__name__) - - -class TestFiles: - def test_upload_and_delete_file(self, file_tracker, tmp_path): - created_files, client = file_tracker - try: - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - assert file_info["status"] is True - assert "id" in file_info - assert "file_name" in file_info - created_files.append(file_info) - search = client.search_files() - ids = [f["id"] for f in search["data"]] - assert file_info["id"] in ids - delete_result = client.delete_files([file_info["id"]]) - assert delete_result[0]["status"] is True - assert delete_result[0]["action"] == "delete" - search_after = client.search_files() - ids_after = [f["id"] for f in search_after["data"]] - assert file_info["id"] not in ids_after - created_files.clear() - finally: - if temp_file.exists(): - temp_file.unlink() - - @pytest.mark.skip(reason="Returns 500 instead of 4xx") - def test_upload_invalid_format(self, file_client, tmp_path): - invalid_file = tmp_path / f"{uuid.uuid4().hex}.py" - invalid_file.write_text("this is py file") - - with pytest.raises(HTTPError) as exc: - file_client.upload_file(str(invalid_file)) - - assert exc.value.status_code == 400 - - @pytest.mark.skip(reason="Uploads a file, but returns 500") - @pytest.mark.parametrize("content", ["", " "]) - def test_upload_empty_file(self, file_client, tmp_path, content): - empty_file = tmp_path / f"{uuid.uuid4().hex}_empty.pdf" - empty_file.write_text(content) - with pytest.raises(HTTPError) as exc: - file_client.upload_file(str(empty_file)) - assert exc.value.status_code == 400 - - def test_move_file(self, file_tracker, dataset_tracker, tmp_path): - created_datasets, dataset_client = dataset_tracker - - first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - - first_resp = dataset_client.create_dataset(name=first_dataset_name) - created_datasets.append(first_dataset_name) - assert "successfully created" in first_resp["detail"].lower() - first_dataset_id = dataset_client.search( - filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}] - )["data"][0]["id"] - - second_resp = dataset_client.create_dataset(name=second_dataset_name) - created_datasets.append(second_dataset_name) - assert "successfully created" in second_resp["detail"].lower() - second_dataset_id = dataset_client.search( - filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}] - )["data"][0]["id"] - - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - created_files.append(file_info) - file_id = file_info["id"] - try: - move1 = client.move_files(name=first_dataset_name, objects=[file_id])[0] - assert move1["status"] is True - assert "successfully bounded" in move1["message"].lower() - files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] - assert any(f["id"] == file_id for f in files_in_first) - move2 = client.move_files(name=second_dataset_name, objects=[file_id])[0] - assert move2["status"] is True - assert "successfully bounded" in move2["message"].lower() - files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] - assert any(f["id"] == file_id for f in files_in_second) - finally: - if temp_file.exists(): - temp_file.unlink() - - def test_add_file_to_dataset_twice(self, file_tracker, dataset_tracker, tmp_path): - created_datasets, dataset_client = dataset_tracker - dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" - dataset = dataset_client.create_dataset(name=dataset_name) - created_datasets.append(dataset_name) - assert "successfully created" in dataset["detail"].lower() - first_dataset_id = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}])[ - "data" - ][0]["id"] - - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - created_files.append(file_info) - file_id = file_info["id"] - try: - move1 = client.move_files(name=dataset_name, objects=[file_id])[0] - assert move1["status"] is True - assert "successfully bounded" in move1["message"].lower() - files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] - assert any(f["id"] == file_id for f in files_in_first) - move2 = client.move_files(name=dataset_name, objects=[file_id])[0] - assert move2["status"] is False - assert "already bounded" in move2["message"].lower() - finally: - if temp_file.exists(): - temp_file.unlink() - - def test_clear_search_files(self, file_tracker, tmp_path): - created_files, client = file_tracker - result = client.search_files() - assert "pagination" in result - assert "data" in result - assert isinstance(result["data"], list) - pagination = result["pagination"] - required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} - assert required_pagination_keys <= pagination.keys() - for file in result["data"]: - required_file_keys = { - "id", - "original_name", - "bucket", - "size_in_bytes", - "extension", - "original_ext", - "content_type", - "pages", - "last_modified", - "status", - "path", - "datasets", - } - assert required_file_keys <= file.keys() - assert isinstance(file["id"], int) - assert isinstance(file["original_name"], str) - assert isinstance(file["size_in_bytes"], int) - - def test_search_existing_file(self, file_tracker, tmp_path): - created_files, client = file_tracker - try: - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - assert file_info["status"] is True - search_resp = client.search_files( - filters=[{"field": "original_name", "operator": "eq", "value": file_info["file_name"]}] - ) - names = [f["original_name"] for f in search_resp["data"]] - assert file_info["file_name"] in names - finally: - if temp_file.exists(): - temp_file.unlink() - - def test_search_non_existing_file(self, file_client): - search_resp = file_client.search_files( - filters=[{"field": "original_name", "operator": "eq", "value": "definitely_not_a_file.pdf"}] - ) - assert search_resp["data"] == [] - - def test_search_multiple_existing_files(self, file_tracker, tmp_path): - created_files, client = file_tracker - f1, t1 = client.upload_temp_file(client, file_tracker, tmp_path) - f2, t2 = client.upload_temp_file(client, file_tracker, tmp_path) - names = [f1["file_name"], f2["file_name"]] - - search = client.search_files(filters=[{"field": "original_name", "operator": "in", "value": names}]) - found_names = {f["original_name"] for f in search["data"]} - assert set(names) <= found_names - - t1.unlink(missing_ok=True) - t2.unlink(missing_ok=True) - - def test_download_existing_file(self, file_tracker, tmp_path): - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - file_id = file_info["id"] - - content = client.download_file(file_id) - assert isinstance(content, (bytes, bytearray)) - assert len(content) > 100 - assert content.startswith(b"%PDF") - - temp_file.unlink(missing_ok=True) - - def test_download_nonexistent_file(self, file_client): - with pytest.raises(HTTPError) as exc: - file_client.download_file(9999999) - assert exc.value.status_code == 404 - - @pytest.mark.parametrize("field", ["original_name", "last_modified", "size_in_bytes"]) - @pytest.mark.parametrize("direction", ["asc", "desc"]) - # name descending fails - def test_files_sorting(self, file_client, field, direction): - resp = file_client.post_json( - "/assets/files/search", - json={ - "pagination": {"page_num": 1, "page_size": 15}, - "filters": [{"field": "original_name", "operator": "ilike", "value": "%%"}], - "sorting": [{"direction": direction, "field": field}], - }, - headers=file_client._default_headers(content_type_json=True), - ) - - data = resp["data"] - values = [d[field] for d in data if field in d] - - if field == "last_modified": - values = [datetime.fromisoformat(v) for v in values] - - if field == "size_in_bytes": - values = [int(v) for v in values] - - expected = sorted(values, reverse=(direction == "desc")) - assert values == expected, f"{field} not sorted {direction}" diff --git a/test_automation_framework/tests/test_jobs.py b/test_automation_framework/tests/test_jobs.py index 03a8b559e..8e486a132 100644 --- a/test_automation_framework/tests/test_jobs.py +++ b/test_automation_framework/tests/test_jobs.py @@ -1,6 +1,8 @@ from logging import getLogger from datetime import datetime, timedelta import uuid +from playwright.sync_api import Page, expect +from helpers.steps.jobs_creation import run_new_job_workflow import pytest @@ -39,7 +41,7 @@ def test_create_and_poll_job( @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) @pytest.mark.parametrize("direction", ["asc", "desc"]) # descending name sorting works weird - def test_sorting(self, jobs_client, field, direction): + def test_jobs_sorting(self, jobs_client, field, direction): resp = jobs_client.post_json( "/jobs/jobs/search", json={ @@ -97,7 +99,7 @@ def test_job_search(self, jobs_client, job_tracker, file_tracker, dataset_tracke assert job_id in job_ids @pytest.mark.parametrize("field", ["creation_datetime", "deadline"]) - def test_date_range_filter(self, jobs_client, field): + def test_jobs_date_range_filter(self, jobs_client, field): start = (datetime.utcnow() - timedelta(days=365)).replace(microsecond=0).isoformat() end = (datetime.utcnow() + timedelta(days=365)).replace(microsecond=0).isoformat() @@ -118,3 +120,113 @@ def test_date_range_filter(self, jobs_client, field): if field in job and job[field] is not None: date_val = datetime.fromisoformat(job[field]) assert datetime.fromisoformat(start) <= date_val <= datetime.fromisoformat(end) + + +class TestJobsFrontend: + def test_jobs_scroll(self, jobs_page: Page): + page = jobs_page + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + page_size_input.click() + page.locator("div[role='option']", has_text="100").click() + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + + last_row = rows.last + last_row.scroll_into_view_if_needed() + expect(last_row).to_be_visible() + + first_row = rows.first + first_row.scroll_into_view_if_needed() + expect(first_row).to_be_visible() + + def test_jobs_pagination_by_page_number(self, jobs_page: Page): + page = jobs_page + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or rows.first.text_content() != old_text + + def test_jobs_pagination_by_arrows(self, jobs_page: Page): + page = jobs_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + old_text_back = rows.first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or rows.first.text_content() != old_text_back + + def test_jobs_show_on_page(self, jobs_page: Page): + page = jobs_page + + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + + expect(rows.first).to_be_visible(timeout=10000) + count = rows.count() + assert count <= int(value), f"Expected at most {value} rows, got {count}" + + @pytest.mark.parametrize("num_files", [1, 3]) + @pytest.mark.parametrize("manager", [None, "Airflow", "Databricks"]) + def test_create_job(self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, num_files, manager): + page = jobs_page + run_new_job_workflow( + page=page, + num_files=num_files, + file_tracker=file_tracker, + jobs_client=jobs_client, + tmp_path=tmp_path, + pipeline_manager=manager, + ) diff --git a/test_automation_framework/tests/test_upload_wizard.py b/test_automation_framework/tests/test_upload_wizard.py index 0482d129c..5cea3aab9 100644 --- a/test_automation_framework/tests/test_upload_wizard.py +++ b/test_automation_framework/tests/test_upload_wizard.py @@ -4,157 +4,28 @@ from helpers.files.file_client_frontend import FrontendFileHelper from logging import getLogger from pathlib import Path -import datetime - +from helpers.steps.jobs_creation import run_upload_workflow logger = getLogger(__name__) class TestUploadWizard: - @staticmethod - def select_dataset(page: Page, dataset_type: str, dataset_name: str = None): - logger.info(f"Select dataset option: {dataset_type}") - if dataset_type == "none": - page.locator("label:has-text('No') div").nth(1).click() - elif dataset_type == "existing": - page.locator("label:has-text('Existing dataset') div").nth(1).click() - page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() - page.get_by_text(dataset_name, exact=True).click() - elif dataset_type == "new": - page.locator("label:has-text('New dataset') div").nth(1).click() - page.get_by_role("textbox", name="Dataset name").fill(dataset_name) - else: - raise ValueError(f"Unknown dataset_type: {dataset_type}") - page.get_by_role("button", name="Next").click() - - @staticmethod - def select_preprocessor(page: Page, preprocessor: str = None, click_next: bool = True) -> None: - logger.info(f"Select preprocessor: {preprocessor or 'No need'}") - if preprocessor is None: - page.get_by_text("No need for preprocessor").click() - elif preprocessor == "any": - preprocessor_section = page.get_by_text("Select preprocessor").locator("..").locator("..") - preprocessor_section.locator("label").nth(1).click() - else: - page.get_by_text(preprocessor, exact=True).click() - if click_next: - page.get_by_role("button", name="Next").click() - - @staticmethod - def select_language(page: Page, language: str = None): - if language: - logger.info(f"Select language: {language}") - page.get_by_role("textbox", name="Please select").click() - page.get_by_text(language, exact=True).click() - page.get_by_role("button", name="Next").click() - - @staticmethod - def fill_job_and_start(page: Page, jobs_client, job_name: str): - job_name = job_name if job_name else f"test_job_{uuid.uuid4().hex[:8]}" - logger.info(f"Fill job name: {job_name}") - page.get_by_role("textbox", name="Job name").fill(job_name) - - logger.info("Select pipeline dropdown") - page.get_by_role("textbox", name="Select pipeline").click() - page.get_by_text("print", exact=True).click() - - logger.info("Start extraction") - - page.get_by_role("button", name="Start Extraction").click() - page.wait_for_url("**/jobs/**", timeout=20000) - jobs = jobs_client.search_jobs() - job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) - assert job_id, f"Job with name {job_name} not found!" - jobs_client.poll_until_finished(job_id, timeout_seconds=180) - page.reload() - expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) - - def select_human_in_the_loop_and_start( - self, - page: Page, - jobs_client, - job_name: str, - validation_type: str = "Cross validation", - day: str | None = None, - annotator: str = "admin", - categories: list[str] = ["Age"], - ): - logger.info("Select Human in the loop") - page.get_by_role("tab", name="Human in the Loop").click() - - logger.info("Select validation type") - page.get_by_role("textbox", name="Select validation type").click() - page.get_by_text(validation_type, exact=True).click() - - page.get_by_role("textbox", name="DD/MM/YYYY").click() - if not day: - day = datetime.datetime.today().day + 1 - logger.info(f"Select date {day}") - page.get_by_text(str(day), exact=True).click() - - logger.info("Select annotator") - page.get_by_role("textbox", name="Select Annotators and").click() - page.get_by_role("listbox").get_by_text(annotator).click(force=True) - page.locator(".uui-input-box.-clickable.uui-focus").click() - - page.get_by_role("textbox", name="Select categories").click() - for category in categories: - logger.info(f"Select category: {category}") - page.get_by_text(category, exact=True).click() - - logger.info("Distribute annotation tasks") - page.get_by_text("Distribute annotation tasks").click() - - self.fill_job_and_start(page, jobs_client, job_name) - - def run_upload_workflow( - self, - page: Page, - frontend_file_helper: FrontendFileHelper, - num_files: int, - file_tracker, - client, - jobs_client, - dataset_type: str = "none", # "none", "existing", "new" - dataset_name: str = None, - tmp_path=None, - language: str = None, - preprocessor: str = None, - job_name: str = None, - human_in_loop: bool = False, - ): - logger.info("Open wizard") - page.get_by_role("button", name="Upload Wizard").click() - - logger.info(f"Prepare {num_files} temp files") - temp_files = frontend_file_helper.prepare_temp_files(tmp_path, num_files=num_files) - frontend_file_helper.upload_files(page, temp_files, file_tracker=file_tracker, client=client) - - self.select_dataset(page, dataset_type, dataset_name) - - self.select_preprocessor(page, preprocessor=preprocessor, click_next=not language) - - self.select_language(page, language) - - if human_in_loop: - self.select_human_in_the_loop_and_start(page, jobs_client, job_name) - else: - self.fill_job_and_start(page, jobs_client, job_name=job_name) - @pytest.mark.parametrize("num_files", [1, 3]) + @pytest.mark.parametrize("manager", [None, "Airflow", "Databricks"]) def test_upload_documents_without_dataset( - self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, num_files + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, num_files, manager ): page = logged_in_page created_files, client = file_tracker frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, file_tracker, client, jobs_client, + pipeline_manager=manager, dataset_type="none", tmp_path=tmp_path, ) @@ -173,7 +44,7 @@ def test_upload_documents_existing_dataset( assert "successfully created" in first_resp["detail"].lower() frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, @@ -200,7 +71,7 @@ def test_upload_documents_existing_dataset_new_name( assert "successfully created" in first_resp["detail"].lower() frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, @@ -224,7 +95,7 @@ def test_upload_documents_new_dataset( created_datasets.append(dataset_name) frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, @@ -250,7 +121,7 @@ def test_upload_documents_with_language( created_files, client = file_tracker frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, @@ -276,7 +147,7 @@ def test_upload_documents_any_preprocessor( created_files, client = file_tracker frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, @@ -302,7 +173,7 @@ def test_upload_documents_all_settings_new_job_name( created_files, client = file_tracker frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, @@ -352,7 +223,7 @@ def test_upload_documents_all_settings_existing_job_name( created_files, client = file_tracker frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, @@ -391,7 +262,7 @@ def test_human_in_the_loop(self, logged_in_page: Page, num_files, file_tracker, page = logged_in_page created_files, client = file_tracker frontend_file_helper = FrontendFileHelper() - self.run_upload_workflow( + run_upload_workflow( page, frontend_file_helper, num_files, From d211c0a69748f558019661c8e62a8280961c7cf7 Mon Sep 17 00:00:00 2001 From: asobolev Date: Thu, 2 Oct 2025 15:37:17 +0200 Subject: [PATCH 34/37] more jobs tests --- .../helpers/steps/jobs_creation.py | 155 ++++++++++++++++-- .../tests/test_documents.py | 4 - test_automation_framework/tests/test_jobs.py | 116 ++++++++++++- 3 files changed, 257 insertions(+), 18 deletions(-) diff --git a/test_automation_framework/helpers/steps/jobs_creation.py b/test_automation_framework/helpers/steps/jobs_creation.py index 3ec601727..b3e83699d 100644 --- a/test_automation_framework/helpers/steps/jobs_creation.py +++ b/test_automation_framework/helpers/steps/jobs_creation.py @@ -44,7 +44,7 @@ def select_language(page: Page, language: str = None): page.get_by_role("button", name="Next").click() -def fill_job_and_start(page: Page, jobs_client, job_name: str, pipeline_manager=None): +def fill_job_and_start(page: Page, jobs_client, job_name: str, pipeline_manager=None, pipeline="print"): job_name = job_name if job_name else f"test_job_{uuid.uuid4().hex[:8]}" logger.info(f"Fill job name: {job_name}") page.get_by_role("textbox", name="Job name").fill(job_name) @@ -55,7 +55,7 @@ def fill_job_and_start(page: Page, jobs_client, job_name: str, pipeline_manager= logger.info("Select pipeline dropdown") page.get_by_role("textbox", name="Select pipeline").click() - page.get_by_text("print", exact=True).click() + page.get_by_text(pipeline, exact=True).click() logger.info("Start extraction") @@ -76,8 +76,10 @@ def select_human_in_the_loop_and_start( validation_type: str = "Cross validation", day: str | None = None, annotator: str = "admin", - categories: list[str] = ["Age"], + categories: list[str] = None, ): + if not categories: + categories = ["Age"] logger.info("Select Human in the loop") page.get_by_role("tab", name="Human in the Loop").click() @@ -118,10 +120,16 @@ def select_files(page: Page, document_names): logger.info(f"Select files: {document_names}") for file_name in document_names: row = page.locator(f"text={file_name}").first + print(f'clicking file "{file_name}"') checkbox_label = row.locator("xpath=preceding-sibling::label") checkbox_label.click(force=True) - page.get_by_role("button", name="Next").click() + +def select_first_element(page: Page): + logger.info("Select first element in the table") + first_row = page.locator('div[role="row"]').nth(1) + checkbox = first_row.locator("label.uui-checkbox-container") + checkbox.click(force=True) def run_upload_workflow( @@ -157,26 +165,153 @@ def run_upload_workflow( fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) -def run_new_job_workflow( +def create_file_in_dataset( + dataset_tracker, + file_tracker, + tmp_path, + num_files, +): + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + dataset = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in dataset["detail"].lower() + first_dataset_id = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}])[ + "data" + ][0]["id"] + + created_files, client = file_tracker + for i in range(num_files): + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + move1 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + files = [file["file_name"] for file in created_files] + return dataset_name, files + + +def run_new_job_documents_workflow( page: Page, num_files: int, file_tracker, jobs_client, + dataset_tracker, tmp_path=None, job_name: str = None, human_in_loop: bool = False, pipeline_manager: str = None, ): - for i in range(num_files): - created_files, client = file_tracker - file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - assert file_info["status"] is True - files = [file["file_name"] for file in created_files] + dataset_name, files = create_file_in_dataset( + dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker + ) logger.info("Open wizard") page.get_by_role("button", name="New job").click() select_files(page, files) + page.get_by_role("button", name="Next").click() + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + if pipeline_manager == "Other": + fill_job_and_start( + page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager, pipeline="AI by MCP" + ) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + + +def run_new_job_first_line_workflow( + page: Page, + num_files: int, + file_tracker, + jobs_client, + dataset_tracker, + tab_button, + tmp_path=None, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + dataset_name, files = create_file_in_dataset( + dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker + ) + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + page.get_by_role("tab", name=tab_button, exact=True).click() + if tab_button == "Datasets": + select_files(page, [dataset_name]) + else: + select_first_element(page) + page.get_by_role("button", name="Next").click() + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + + +def run_new_job_multi_tab_workflow( + page: Page, + jobs_client, + file_tracker, + dataset_tracker, + tmp_path, + tabs: list[str], + num_files=1, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + logger.info(f"Preparing temp document for tabs: {tabs}") + dataset_name, files = create_file_in_dataset( + dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker + ) + + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + + for tab in tabs: + logger.info(f"Go to tab: {tab}") + page.get_by_role("tab", name=tab, exact=True).click() + + if tab == "Documents": + select_files(page, files) + else: + select_first_element(page) + + page.get_by_role("button", name="Next").click() + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + + +def run_new_job_dataset_without_documents_workflow( + page: Page, + jobs_client, + dataset_tracker, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + dataset = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in dataset["detail"].lower() + + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + page.get_by_role("tab", name="Datasets", exact=True).click() + + select_files(page, [dataset_name]) + page.get_by_role("button", name="Next").click() if human_in_loop: select_human_in_the_loop_and_start(page, jobs_client, job_name) diff --git a/test_automation_framework/tests/test_documents.py b/test_automation_framework/tests/test_documents.py index 3fefa3e37..da8284514 100644 --- a/test_automation_framework/tests/test_documents.py +++ b/test_automation_framework/tests/test_documents.py @@ -24,7 +24,6 @@ def test_upload_and_delete_file(self, file_tracker, tmp_path): assert file_info["status"] is True assert "id" in file_info assert "file_name" in file_info - created_files.append(file_info) search = client.search_files() ids = [f["id"] for f in search["data"]] assert file_info["id"] in ids @@ -80,7 +79,6 @@ def test_move_file(self, file_tracker, dataset_tracker, tmp_path): created_files, client = file_tracker file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - created_files.append(file_info) file_id = file_info["id"] try: move1 = client.move_files(name=first_dataset_name, objects=[file_id])[0] @@ -109,7 +107,6 @@ def test_add_file_to_dataset_twice(self, file_tracker, dataset_tracker, tmp_path created_files, client = file_tracker file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) - created_files.append(file_info) file_id = file_info["id"] try: move1 = client.move_files(name=dataset_name, objects=[file_id])[0] @@ -429,7 +426,6 @@ def test_delete_files(self, logged_in_page: Page, file_tracker, tmp_path, num_fi file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) assert file_info["status"] is True uploaded_files.append(file_info) - created_files.append(file_info) temp_files.append(temp_file) page.reload() diff --git a/test_automation_framework/tests/test_jobs.py b/test_automation_framework/tests/test_jobs.py index 8e486a132..78c50398b 100644 --- a/test_automation_framework/tests/test_jobs.py +++ b/test_automation_framework/tests/test_jobs.py @@ -2,7 +2,13 @@ from datetime import datetime, timedelta import uuid from playwright.sync_api import Page, expect -from helpers.steps.jobs_creation import run_new_job_workflow +from helpers.steps.jobs_creation import ( + run_new_job_documents_workflow, + run_new_job_first_line_workflow, + run_new_job_multi_tab_workflow, + run_new_job_dataset_without_documents_workflow, +) +from helpers.base_client.base_client import HTTPError import pytest @@ -121,6 +127,36 @@ def test_jobs_date_range_filter(self, jobs_client, field): date_val = datetime.fromisoformat(job[field]) assert datetime.fromisoformat(start) <= date_val <= datetime.fromisoformat(end) + def test_create_job_with_existing_name( + self, file_client, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid + ): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] + assert move_resp["status"] is True + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp_first = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_tracker[0].append(create_resp_first) + job_id = create_resp_first.get("id") + assert job_id + + with pytest.raises(HTTPError) as exc: + jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + assert exc.value.status_code == 400 + class TestJobsFrontend: def test_jobs_scroll(self, jobs_page: Page): @@ -219,14 +255,86 @@ def test_jobs_show_on_page(self, jobs_page: Page): assert count <= int(value), f"Expected at most {value} rows, got {count}" @pytest.mark.parametrize("num_files", [1, 3]) - @pytest.mark.parametrize("manager", [None, "Airflow", "Databricks"]) - def test_create_job(self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, num_files, manager): + @pytest.mark.parametrize("manager", ["Airflow", "Databricks", "Other"]) + def test_create_job_documents_tab( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files, manager + ): page = jobs_page - run_new_job_workflow( + run_new_job_documents_workflow( page=page, num_files=num_files, file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + jobs_client=jobs_client, + tmp_path=tmp_path, + pipeline_manager=manager, + ) + + @pytest.mark.parametrize("manager", ["Airflow", "Databricks"]) + @pytest.mark.parametrize("tab", ["Jobs", "Datasets", "Revisions"]) + def test_create_job_other_tabs( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, tab, manager, dataset_tracker + ): + page = jobs_page + run_new_job_first_line_workflow( + page=page, + num_files=1, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + jobs_client=jobs_client, + tmp_path=tmp_path, + pipeline_manager=manager, + tab_button=tab, + ) + + @pytest.mark.parametrize("manager", ["Airflow", "Databricks"]) + @pytest.mark.parametrize( + "tabs", + [ + ["Documents", "Jobs"], + ["Documents", "Datasets"], + ["Documents", "Revisions"], + ["Documents", "Jobs", "Datasets", "Revisions"], + ], + ) + def test_create_job_multi_tabs( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, dataset_tracker, manager, tabs + ): + page = jobs_page + run_new_job_multi_tab_workflow( + page=page, jobs_client=jobs_client, + tabs=tabs, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, tmp_path=tmp_path, pipeline_manager=manager, ) + + def test_create_job_zero_dataset( + self, + jobs_page: Page, + file_tracker, + tmp_path, + jobs_client, + dataset_tracker, + ): + # outcome? + page = jobs_page + run_new_job_dataset_without_documents_workflow( + page=page, + jobs_client=jobs_client, + dataset_tracker=dataset_tracker, + ) + + def test_create_job_without_name( + self, + jobs_page: Page, + ): + page = jobs_page + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + page.get_by_role("button", name="Next").click() + page.get_by_role("button", name="New Job").click() + error_label = page.locator("div[role='alert'].uui-invalid-message").nth(0) + expect(error_label).to_have_text("The field is mandatory", timeout=5000) From ed29ece108f9becc9f51a85d53659c7f7dee0075 Mon Sep 17 00:00:00 2001 From: asobolev Date: Fri, 3 Oct 2025 16:58:09 +0200 Subject: [PATCH 35/37] added more jobs creation tests + human in the loop --- .../helpers/steps/jobs_creation.py | 40 +++++++++++----- test_automation_framework/tests/test_jobs.py | 48 +++++++++++++++++++ 2 files changed, 77 insertions(+), 11 deletions(-) diff --git a/test_automation_framework/helpers/steps/jobs_creation.py b/test_automation_framework/helpers/steps/jobs_creation.py index b3e83699d..1af850316 100644 --- a/test_automation_framework/helpers/steps/jobs_creation.py +++ b/test_automation_framework/helpers/steps/jobs_creation.py @@ -44,7 +44,9 @@ def select_language(page: Page, language: str = None): page.get_by_role("button", name="Next").click() -def fill_job_and_start(page: Page, jobs_client, job_name: str, pipeline_manager=None, pipeline="print"): +def fill_job_and_start( + page: Page, jobs_client, job_name: str, pipeline_manager=None, pipeline="print", save_as_draft=False +): job_name = job_name if job_name else f"test_job_{uuid.uuid4().hex[:8]}" logger.info(f"Fill job name: {job_name}") page.get_by_role("textbox", name="Job name").fill(job_name) @@ -57,16 +59,23 @@ def fill_job_and_start(page: Page, jobs_client, job_name: str, pipeline_manager= page.get_by_role("textbox", name="Select pipeline").click() page.get_by_text(pipeline, exact=True).click() - logger.info("Start extraction") - - page.get_by_role("button", name="Start Extraction").click() + if save_as_draft: + logger.info("Save as draft") + page.get_by_role("button", name="Save as Draft").click() + else: + logger.info("Start extraction") + page.get_by_role("button", name="Start Extraction").click() page.wait_for_url("**/jobs/**", timeout=20000) jobs = jobs_client.search_jobs() job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) assert job_id, f"Job with name {job_name} not found!" - jobs_client.poll_until_finished(job_id, timeout_seconds=180) - page.reload() - expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + if not save_as_draft: + jobs_client.poll_until_finished(job_id, timeout_seconds=180) + page.reload() + expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + else: + page.reload() + expect(page.get_by_text("Draft")).to_be_visible(timeout=10000) def select_human_in_the_loop_and_start( @@ -77,6 +86,7 @@ def select_human_in_the_loop_and_start( day: str | None = None, annotator: str = "admin", categories: list[str] = None, + distribute_tasks: bool = False, ): if not categories: categories = ["Age"] @@ -103,8 +113,9 @@ def select_human_in_the_loop_and_start( logger.info(f"Select category: {category}") page.get_by_text(category, exact=True).click() - logger.info("Distribute annotation tasks") - page.get_by_text("Distribute annotation tasks").click() + if distribute_tasks: + logger.info("Distribute annotation tasks") + page.get_by_text("Distribute annotation tasks").click() fill_job_and_start(page, jobs_client, job_name) @@ -202,7 +213,9 @@ def run_new_job_documents_workflow( tmp_path=None, job_name: str = None, human_in_loop: bool = False, + validation_type="Cross validation", pipeline_manager: str = None, + distribute_tasks: bool = False, ): dataset_name, files = create_file_in_dataset( dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker @@ -215,7 +228,9 @@ def run_new_job_documents_workflow( page.get_by_role("button", name="Next").click() if human_in_loop: - select_human_in_the_loop_and_start(page, jobs_client, job_name) + select_human_in_the_loop_and_start( + page, jobs_client, job_name, validation_type=validation_type, distribute_tasks=distribute_tasks + ) else: if pipeline_manager == "Other": fill_job_and_start( @@ -236,6 +251,7 @@ def run_new_job_first_line_workflow( job_name: str = None, human_in_loop: bool = False, pipeline_manager: str = None, + save_as_draft: bool = False, ): dataset_name, files = create_file_in_dataset( dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker @@ -252,7 +268,9 @@ def run_new_job_first_line_workflow( if human_in_loop: select_human_in_the_loop_and_start(page, jobs_client, job_name) else: - fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + fill_job_and_start( + page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager, save_as_draft=save_as_draft + ) def run_new_job_multi_tab_workflow( diff --git a/test_automation_framework/tests/test_jobs.py b/test_automation_framework/tests/test_jobs.py index 78c50398b..f9ddea5f3 100644 --- a/test_automation_framework/tests/test_jobs.py +++ b/test_automation_framework/tests/test_jobs.py @@ -338,3 +338,51 @@ def test_create_job_without_name( page.get_by_role("button", name="New Job").click() error_label = page.locator("div[role='alert'].uui-invalid-message").nth(0) expect(error_label).to_have_text("The field is mandatory", timeout=5000) + + def test_create_job_save_draft( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker + ): + page = jobs_page + run_new_job_first_line_workflow( + page=page, + num_files=1, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + jobs_client=jobs_client, + tmp_path=tmp_path, + tab_button="Jobs", + save_as_draft=True, + ) + + @pytest.mark.parametrize( + "validation_type", ["Cross validation", "Extensive validation", "Hierarchical validation", "Validation only"] + ) + def test_create_job_human_in_the_loop( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, dataset_tracker, validation_type + ): + page = jobs_page + run_new_job_documents_workflow( + page=page, + num_files=1, + jobs_client=jobs_client, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + tmp_path=tmp_path, + human_in_loop=True, + validation_type=validation_type, + ) + + def test_create_job_human_in_the_loop_distribute( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, dataset_tracker + ): + page = jobs_page + run_new_job_documents_workflow( + page=page, + num_files=1, + jobs_client=jobs_client, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + tmp_path=tmp_path, + human_in_loop=True, + distribute_tasks=True, + ) From 4d65f984105eda6c679999b1fb535da7e3bbfdf4 Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 7 Oct 2025 10:01:44 +0200 Subject: [PATCH 36/37] added some more jobs tests --- test_automation_framework/tests/test_jobs.py | 63 ++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/test_automation_framework/tests/test_jobs.py b/test_automation_framework/tests/test_jobs.py index f9ddea5f3..8c4b71b62 100644 --- a/test_automation_framework/tests/test_jobs.py +++ b/test_automation_framework/tests/test_jobs.py @@ -11,6 +11,7 @@ from helpers.base_client.base_client import HTTPError import pytest +import re logger = getLogger(__name__) @@ -386,3 +387,65 @@ def test_create_job_human_in_the_loop_distribute( human_in_loop=True, distribute_tasks=True, ) + + def test_open_any_job_from_table(self, jobs_page: Page): + page = jobs_page + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + expect(rows.first).to_be_visible(timeout=10000) + first_job = rows.first.locator("div").nth(1) + job_name = first_job.text_content().strip() + first_job.click() + expect(page).to_have_url(re.compile(r".*/jobs/.*"), timeout=10000) + expect(page.get_by_text(job_name)).to_be_visible(timeout=10000) + + def test_open_job_panel_load_bar(self, jobs_page: Page): + page = jobs_page + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + expect(rows.first).to_be_visible(timeout=10000) + rows.first.click() + + sidebar = page.locator("div[class*='job-page_job-page-sidebar-content']") + expect(sidebar).to_be_visible(timeout=10000) + + progress_text = sidebar.locator("p[class*='job-sidebar-header_progressBarText']") + expect(progress_text).to_be_visible(timeout=10000) + + progress_bar = sidebar.locator("div[class*='job-sidebar-header_bar']") + count = progress_bar.count() + assert count > 0, "Progress bar element not found in DOM" + + def test_open_job_panel_hide_unhide(self, jobs_page: Page): + page = jobs_page + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + expect(rows.first).to_be_visible(timeout=10000) + rows.first.click() + + sidebar = page.locator("div[class*='job-page_job-page-sidebar-content']") + expect(sidebar).to_be_visible(timeout=10000) + + panel_title = sidebar.locator("h2") + expect(panel_title).to_have_text("Automatic", timeout=10000) + + panel_wrapper = sidebar.locator("div[class*='jod-detailed-sidebar-connector_sidebar-panel-wrapper']") + toggle_button = sidebar.locator("button[class*='jod-detailed-sidebar-connector_close-icon']") + expect(toggle_button).to_be_visible(timeout=5000) + + initial_classes = panel_wrapper.first.get_attribute("class") or "" + assert "sidebar-panel-opened" in initial_classes, f"Expected opened class, got: {initial_classes}" + + toggle_button.click() + + page.wait_for_timeout(300) + closed_classes = panel_wrapper.first.get_attribute("class") or "" + assert "sidebar-panel-closed" in closed_classes, f"Expected closed class, got: {closed_classes}" + + open_icon_button = sidebar.locator("button[class*='jod-detailed-sidebar-connector_open-icon']") + expect(open_icon_button).to_be_visible(timeout=5000) + + open_icon_button.click() + + page.wait_for_timeout(300) + reopened_classes = panel_wrapper.first.get_attribute("class") or "" + assert "sidebar-panel-opened" in reopened_classes, f"Expected reopened class, got: {reopened_classes}" From 385f493cd2136be93bb6b7c3e171cce6b5c6b7f4 Mon Sep 17 00:00:00 2001 From: asobolev Date: Tue, 21 Oct 2025 13:18:53 +0200 Subject: [PATCH 37/37] added categories and tasks frontend tests --- test_automation_framework/conftest.py | 18 +++ .../tests/test_categories.py | 99 ++++++++++++++++ test_automation_framework/tests/test_jobs.py | 2 +- test_automation_framework/tests/test_tasks.py | 110 ++++++++++++++++++ 4 files changed, 228 insertions(+), 1 deletion(-) create mode 100644 test_automation_framework/tests/test_tasks.py diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py index f90f89d2b..0547eae81 100644 --- a/test_automation_framework/conftest.py +++ b/test_automation_framework/conftest.py @@ -181,3 +181,21 @@ def jobs_page(logged_in_page, settings) -> Page: rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") expect(rows.first).to_be_visible(timeout=5000) return page + + +@pytest.fixture +def categories_page(logged_in_page, settings) -> Page: + page = logged_in_page + page.goto(f"{settings.BASE_URL}:8083/categories") + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + expect(rows.first).to_be_visible(timeout=5000) + return page + + +@pytest.fixture +def tasks_page(logged_in_page, settings) -> Page: + page = logged_in_page + page.goto(f"{settings.BASE_URL}:8083/my tasks") + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + expect(rows.first).to_be_visible(timeout=5000) + return page diff --git a/test_automation_framework/tests/test_categories.py b/test_automation_framework/tests/test_categories.py index 5d6f73cbf..2a064b49f 100644 --- a/test_automation_framework/tests/test_categories.py +++ b/test_automation_framework/tests/test_categories.py @@ -1,5 +1,7 @@ from logging import getLogger import uuid +from playwright.sync_api import Page, expect + import pytest @@ -23,3 +25,100 @@ def test_create_and_delete_category(self, auth_token, settings, tenant, categori search_after_delete = categories_client.search_categories(page_size=100) ids_after = [c.id for c in search_after_delete.data] assert unique_id not in ids_after, f"Category {unique_id} still present after deletion" + + +class TestCategoriesFrontend: + def test_categories_scroll(self, categories_page: Page): + page = categories_page + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + page_size_input.click() + page.locator("div[role='option']", has_text="100").click() + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + + last_row = rows.last + last_row.scroll_into_view_if_needed() + expect(last_row).to_be_visible() + + first_row = rows.first + first_row.scroll_into_view_if_needed() + expect(first_row).to_be_visible() + + def test_categories_pagination_by_page_number(self, categories_page: Page): + page = categories_page + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or rows.first.text_content() != old_text + + def test_categories_pagination_by_arrows(self, categories_page: Page): + page = categories_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + old_text_back = rows.first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or rows.first.text_content() != old_text_back + + def test_categories_show_on_page(self, categories_page: Page): + page = categories_page + + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + + expect(rows.first).to_be_visible(timeout=10000) + count = rows.count() + assert count <= int(value), f"Expected at most {value} rows, got {count}" diff --git a/test_automation_framework/tests/test_jobs.py b/test_automation_framework/tests/test_jobs.py index 8c4b71b62..e33f8de70 100644 --- a/test_automation_framework/tests/test_jobs.py +++ b/test_automation_framework/tests/test_jobs.py @@ -395,7 +395,7 @@ def test_open_any_job_from_table(self, jobs_page: Page): first_job = rows.first.locator("div").nth(1) job_name = first_job.text_content().strip() first_job.click() - expect(page).to_have_url(re.compile(r".*/jobs/.*"), timeout=10000) + expect(page).to_have_url(re.compile(r".*/jobs.*"), timeout=10000) expect(page.get_by_text(job_name)).to_be_visible(timeout=10000) def test_open_job_panel_load_bar(self, jobs_page: Page): diff --git a/test_automation_framework/tests/test_tasks.py b/test_automation_framework/tests/test_tasks.py new file mode 100644 index 000000000..c88b014a0 --- /dev/null +++ b/test_automation_framework/tests/test_tasks.py @@ -0,0 +1,110 @@ +from logging import getLogger +from playwright.sync_api import Page, expect + + +logger = getLogger(__name__) + + +class TestTasksFrontend: + def test_tasks_scroll(self, tasks_page: Page): + page = tasks_page + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + page_size_input.click() + page.locator("div[role='option']", has_text="100").click() + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + + last_row = rows.last + last_row.scroll_into_view_if_needed() + expect(last_row).to_be_visible() + + first_row = rows.first + first_row.scroll_into_view_if_needed() + expect(first_row).to_be_visible() + + def test_tasks_pagination_by_page_number(self, tasks_page: Page): + page = tasks_page + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or rows.first.text_content() != old_text + + def test_tasks_pagination_by_arrows(self, tasks_page: Page): + page = tasks_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + old_text_back = rows.first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or rows.first.text_content() != old_text_back + + def test_tasks_show_on_page(self, tasks_page: Page): + page = tasks_page + + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + expect(rows.first).to_be_visible(timeout=10000) + page.wait_for_timeout(1000) + page.wait_for_function( + """(expected) => { + const rows = document.querySelectorAll("div[role='row']:not(.uui-table-header-row)"); + return rows.length <= expected; + }""", + arg=int(value), + timeout=5000, + ) + count = rows.count() + assert count <= int(value), f"Expected at most {value} rows, got {count}"