diff --git a/test_automation_framework/.flake8 b/test_automation_framework/.flake8 new file mode 100644 index 000000000..37c03609e --- /dev/null +++ b/test_automation_framework/.flake8 @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 120 +ignore = E203, W503 +exclude = .git,__pycache__,build,dist diff --git a/test_automation_framework/.gitignore b/test_automation_framework/.gitignore new file mode 100644 index 000000000..2d08c15ef --- /dev/null +++ b/test_automation_framework/.gitignore @@ -0,0 +1,5 @@ +.env +__pycache__/ +*.pyc +.vscode/ +.idea/ \ No newline at end of file diff --git a/test_automation_framework/.pre-commit-config.yaml b/test_automation_framework/.pre-commit-config.yaml new file mode 100644 index 000000000..f1d963b4e --- /dev/null +++ b/test_automation_framework/.pre-commit-config.yaml @@ -0,0 +1,14 @@ +repos: + - repo: https://github.com/psf/black + rev: 25.1.0 + hooks: + - id: black + args: [--line-length=120] + language_version: python3 + + - repo: https://github.com/pycqa/flake8 + rev: 7.3.0 + hooks: + - id: flake8 + args: [--config=test_automation_framework/.flake8] + additional_dependencies: [] diff --git a/test_automation_framework/README.md b/test_automation_framework/README.md new file mode 100644 index 000000000..b8d16ec11 --- /dev/null +++ b/test_automation_framework/README.md @@ -0,0 +1,47 @@ +# BadgerDoc Test Automation Framework + +This project is a Python-based **test automation framework** built with [pytest](https://docs.pytest.org/). + +## Getting Started + +### 1. Install PDM +Make sure you have [PDM](https://pdm-project.org/latest/#installation) installed: + +```bash +brew install pdm # macOS +# or +pip install pdm +``` + +Verify installation: + +```bash +pdm --version +``` + +### 2. Clone the repository + +```bash +git clone https://github.com/epam/badgerdoc.git +cd badgerdoc +``` + +### 3. Install dependencies + +```bash +pdm install +``` + +### 4. Pre-commit hooks + +Enable pre-commit to enforce style and linting: +```bash +pre-commit install +``` +Now hooks will run automatically before each commit. + +### 5. Run tests + +```bash +pdm run pytest +``` \ No newline at end of file diff --git a/test_automation_framework/config/defaults.yaml b/test_automation_framework/config/defaults.yaml new file mode 100644 index 000000000..17d33a05a --- /dev/null +++ b/test_automation_framework/config/defaults.yaml @@ -0,0 +1,8 @@ +BASE_URL: "http://demo.badgerdoc.com" +BASE_PORT: 8080 +TIMEOUT_SECONDS: 30 +MAX_WORKERS: 4 +USE_MOCK_LLM: true +LOG_LEVEL: "INFO" +API_USER: "user@example.com" +API_PASS: "changeme" diff --git a/test_automation_framework/conftest.py b/test_automation_framework/conftest.py new file mode 100644 index 000000000..0547eae81 --- /dev/null +++ b/test_automation_framework/conftest.py @@ -0,0 +1,201 @@ +import logging +from logging import getLogger +from typing import Tuple +from playwright.sync_api import expect + + +import pytest + +from settings import load_settings +from helpers.auth.auth_client import AuthClient +from helpers.base_client.base_client import BaseClient +from helpers.datasets.dataset_client import DatasetClient +from helpers.files.file_client import FileClient +from helpers.jobs.jobs_client import JobsClient +from helpers.menu.menu_client import MenuClient +from helpers.category.categories import CategoriesClient +from helpers.users.users import UsersClient +from helpers.reports.reports_client import ReportsClient +from helpers.plugins.plugins_client import PluginsClient + +from playwright.sync_api import Page + +logger = getLogger(__name__) + + +def pytest_configure(): + logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s") + + +@pytest.fixture(scope="session") +def settings(): + return load_settings() + + +@pytest.fixture(scope="session") +def tenant(settings) -> str: + return getattr(settings, "TENANT", "demo-badgerdoc") + + +@pytest.fixture(scope="session") +def base_client(settings) -> BaseClient: + client = BaseClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", timeout=10) + yield client + client.close() + + +@pytest.fixture(scope="session") +def auth_service(base_client) -> AuthClient: + return AuthClient(base_client) + + +@pytest.fixture(scope="session") +def auth_token(auth_service, settings) -> Tuple[str, str]: + return auth_service.get_token(settings.API_USER, settings.API_PASS.get_secret_value()) + + +@pytest.fixture +def access_token(auth_token) -> str: + return auth_token[0] + + +@pytest.fixture +def menu_client(settings, access_token, tenant) -> MenuClient: + return MenuClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + + +@pytest.fixture +def dataset_client(settings, access_token, tenant) -> DatasetClient: + return DatasetClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + + +@pytest.fixture +def file_client(settings, access_token, tenant) -> FileClient: + return FileClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + + +@pytest.fixture +def jobs_client(settings, access_token, tenant) -> JobsClient: + return JobsClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + + +@pytest.fixture +def reports_client(settings, access_token, tenant) -> ReportsClient: + return ReportsClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + + +@pytest.fixture +def plugins_client(settings, access_token, tenant) -> PluginsClient: + return PluginsClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + + +@pytest.fixture +def user_uuid(settings, access_token, tenant) -> str: + users_client = UsersClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + users = users_client.search_users() + return next((u.id for u in users if u.username == "admin"), None) + + +@pytest.fixture +def categories_client(settings, access_token, tenant) -> CategoriesClient: + return CategoriesClient(f"{settings.BASE_URL}:{settings.BASE_PORT}", access_token, tenant) + + +@pytest.fixture +def dataset_tracker(dataset_client): + created: list[str] = [] + yield created, dataset_client + for name in created: + try: + resp = dataset_client.delete_dataset(name=name) + logger.info(f"[dataset_tracker] Deleted dataset {name}: {resp.get('detail')}") + except Exception as e: + logger.warning(f"[dataset_tracker] Failed to delete dataset {name}: {e}") + + +@pytest.fixture +def file_tracker(file_client): + created_files: list[dict] = [] + yield created_files, file_client + if created_files: + ids = [f["id"] for f in created_files if f.get("id") is not None] + if ids: + try: + result = file_client.delete_files(ids) + logger.info(f"[file_tracker] Deleted files: {ids}, response={result}") + except Exception as e: + logger.warning(f"[file_tracker] Failed to cleanup files {ids}: {e}") + + +@pytest.fixture +def job_tracker(jobs_client): + created: list[dict] = [] + yield created, jobs_client + for job in created: + job_id = job.get("id") or job.get("job_id") or (job.get("job") or {}).get("id") + if not job_id: + continue + try: + jobs_client.post("/jobs/jobs/cancel", json={"id": job_id}, headers=jobs_client._default_headers()) + logger.info(f"[job_tracker] Cancelled job {job_id}") + except Exception as e: + logger.warning(f"[job_tracker] Could not cancel job {job_id}: {e}") + + +@pytest.fixture +def plugins_tracker(plugins_client): + created: list[int] = [] + yield created, plugins_client + for id in created: + try: + plugins_client.delete_plugin(plugin_id=id) + logger.info(f"[plugins_tracker] Deleted plugin {id}") + except Exception as e: + logger.warning(f"[plugins_tracker] Failed to delete plugin {id}: {e}") + + +@pytest.fixture +def logged_in_page(page: Page, settings) -> Page: + page.goto(f"{settings.BASE_URL}:8083/login", timeout=180000) + page.get_by_role("textbox", name="Username").fill("admin") + page.get_by_role("textbox", name="Password").fill("admin") + page.get_by_role("button", name="Login", exact=True).click() + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=100000) + return page + + +@pytest.fixture +def plugins_page(logged_in_page, settings) -> Page: + page = logged_in_page + page.goto(f"{settings.BASE_URL}:8083/settings/plugins") + row_cells = page.locator("div[role='row'] div[role='cell']:first-child div div") + expect(row_cells.first).to_be_visible(timeout=100000) + return page + + +@pytest.fixture +def jobs_page(logged_in_page, settings) -> Page: + page = logged_in_page + page.goto(f"{settings.BASE_URL}:8083/jobs") + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + expect(rows.first).to_be_visible(timeout=5000) + return page + + +@pytest.fixture +def categories_page(logged_in_page, settings) -> Page: + page = logged_in_page + page.goto(f"{settings.BASE_URL}:8083/categories") + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + expect(rows.first).to_be_visible(timeout=5000) + return page + + +@pytest.fixture +def tasks_page(logged_in_page, settings) -> Page: + page = logged_in_page + page.goto(f"{settings.BASE_URL}:8083/my tasks") + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + expect(rows.first).to_be_visible(timeout=5000) + return page diff --git a/test_automation_framework/helpers/auth/auth_client.py b/test_automation_framework/helpers/auth/auth_client.py new file mode 100644 index 000000000..564f841bb --- /dev/null +++ b/test_automation_framework/helpers/auth/auth_client.py @@ -0,0 +1,46 @@ +from __future__ import annotations +from typing import Optional +from pydantic import BaseModel + +from helpers.base_client.base_client import BaseClient + + +class TokenResponse(BaseModel): + access_token: str + refresh_token: str + id_token: Optional[str] = None + scope: Optional[str] = None + session_state: Optional[str] = None + token_type: Optional[str] = None + expires_in: Optional[int] = None + + +class AuthClient: + def __init__(self, client: BaseClient) -> None: + self.client = client + + def get_token(self, username: str, password: str, client_id: str = "admin-cli") -> tuple[str, str]: + resp = self.client.post_json( + "/users/token", + data={ + "grant_type": "password", + "username": username, + "password": password, + "client_id": client_id, + }, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + result = TokenResponse.model_validate(resp) + return result.access_token, result.refresh_token + + def refresh_token(self, refresh_token: str, client_id: str = "admin-cli") -> tuple[str, str]: + resp = self.client.post_json( + "/users/refresh_token", + json={ + "grant_type": "refresh_token", + "client_id": client_id, + "refresh_token": refresh_token, + }, + ) + result = TokenResponse.model_validate(resp) + return result.access_token, result.refresh_token diff --git a/test_automation_framework/helpers/base_client/base_client.py b/test_automation_framework/helpers/base_client/base_client.py new file mode 100644 index 000000000..b21a77548 --- /dev/null +++ b/test_automation_framework/helpers/base_client/base_client.py @@ -0,0 +1,125 @@ +from __future__ import annotations +from typing import Any, Optional +import httpx +import time +import logging + +logger = logging.getLogger(__name__) + + +class HTTPError(RuntimeError): + def __init__( + self, + message: str, + status_code: Optional[int] = None, + body: Optional[str] = None, + ): + super().__init__(message) + self.status_code = status_code + self.body = body + + def __str__(self): + base = super().__str__() + if self.body: + return f"{base}\nResponse body: {self.body}" + return base + + +class BaseClient: + def __init__( + self, base_url: str, timeout: int = 30, token: Optional[str] = None, tenant: Optional[str] = None + ) -> None: + self.base_url = base_url.rstrip("/") + self.timeout = timeout + self._token = token + self._tenant = tenant + self._client = httpx.Client(base_url=self.base_url, timeout=self.timeout) + + def set_token(self, token: str | None) -> None: + self._token = token + + def set_tenant(self, tenant: str | None) -> None: + self._tenant = tenant + + def _default_headers(self, content_type_json: bool = False, extra: dict[str, str] | None = None) -> dict[str, str]: + headers: dict[str, str] = {} + if self._token: + headers["Authorization"] = f"Bearer {self._token}" + if self._tenant: + headers["X-Current-Tenant"] = self._tenant + if content_type_json: + headers["Content-Type"] = "application/json" + if extra: + headers.update(extra) + return headers + + def _request(self, method: str, path: str, headers: dict | None = None, **kwargs: Any) -> httpx.Response: + rel_path = path if path.startswith("/") else "/" + path + start = time.perf_counter() + merged_headers = {**self._default_headers(), **(headers or {})} + + # Log the request details for debugging + logger.debug(f"Making {method} request to {self.base_url}{rel_path}") + logger.debug(f"Headers: {merged_headers}") + if "json" in kwargs: + logger.debug(f"JSON payload: {kwargs['json']}") + + try: + resp = self._client.request(method, rel_path, headers=merged_headers, **kwargs) + resp.raise_for_status() + logger.debug( + f"HTTP {method} {self.base_url}{rel_path} -> {resp.status_code} in {time.perf_counter() - start:.3f}s" + ) + return resp + except httpx.HTTPStatusError as exc: + resp = exc.response + error_body = resp.text + logger.error( + f"Bad response: {resp.status_code} for {method} {self.base_url}{rel_path} - body: {error_body[:500]}" + ) + # Create a more informative error message + error_message = f"{method} {self.base_url}{rel_path} -> {resp.status_code}" + if error_body: + error_message += f"\nServer response: {error_body}" + + raise HTTPError( + error_message, + status_code=resp.status_code, + body=error_body, + ) from exc + except httpx.RequestError as exc: + logger.exception(f"Request failed: {method} {self.base_url}{rel_path}") + raise HTTPError(f"request failed: {method} {self.base_url}{rel_path}") from exc + + def get(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("GET", path, **kwargs) + + def post(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("POST", path, **kwargs) + + def put(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("PUT", path, **kwargs) + + def delete(self, path: str, **kwargs: Any) -> httpx.Response: + return self._request("DELETE", path, **kwargs) + + def get_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("GET", path, headers=headers, **kwargs).json() + + def post_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("POST", path, headers=headers, **kwargs).json() + + def put_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("PUT", path, headers=headers, **kwargs).json() + + def delete_json(self, path: str, headers: dict | None = None, **kwargs: Any) -> Any: + return self._request("DELETE", path, headers=headers, **kwargs).json() + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "BaseClient": + return self + + def __exit__(self, exc_type, exc, tb) -> None: + self.close() diff --git a/test_automation_framework/helpers/category/categories.py b/test_automation_framework/helpers/category/categories.py new file mode 100644 index 000000000..12760608e --- /dev/null +++ b/test_automation_framework/helpers/category/categories.py @@ -0,0 +1,113 @@ +from __future__ import annotations +from typing import List, Optional +from pydantic import BaseModel +import logging +from helpers.base_client.base_client import BaseClient + +logger = logging.getLogger(__name__) + + +class CategoryParent(BaseModel): + name: str + id: str + type: str + metadata: dict + parent: Optional[str] = None + data_attributes: List[dict] = [] + is_leaf: Optional[bool] = None + + +class Category(BaseModel): + id: str + name: str + type: str + metadata: dict + parent: Optional[str] = None + data_attributes: List[dict] = [] + parents: List[CategoryParent] = [] + is_leaf: bool + + +class Pagination(BaseModel): + page_num: int + page_offset: int + page_size: int + min_pages_left: int + total: int + has_more: bool + + +class CategoriesResponse(BaseModel): + pagination: Pagination + data: List[Category] + + +class CategoryCreateResponse(BaseModel): + id: str + name: str + type: str + metadata: dict + parent: Optional[str] = None + data_attributes: list[dict] = [] + editor: Optional[str] = None + parents: Optional[list[dict]] = None + is_leaf: Optional[bool] = None + + +class CategoriesClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def search_categories( + self, + page_num: int = 1, + page_size: int = 15, + filters: list[dict] | None = None, + sorting: list[dict] | None = None, + ) -> CategoriesResponse: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters or [], + "sorting": sorting or [{"direction": "desc", "field": "name"}], + } + + resp = self.post_json( + "/annotation/categories/search", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + return CategoriesResponse.model_validate(resp) + + def create_category( + self, + category_id: str, + name: str, + category_type: str = "box", + parent: str | None = None, + metadata: dict | None = None, + data_attributes: list[dict] | None = None, + ) -> CategoryCreateResponse: + payload = { + "id": category_id, + "name": name, + "type": category_type, + "parent": parent, + "metadata": metadata or {"color": "#67DE61"}, + "data_attributes": data_attributes or [], + } + resp = self.post_json( + "/annotation/categories", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + return CategoryCreateResponse.model_validate(resp) + + def delete_category(self, category_id: str) -> dict: + payload = {"id": category_id} + resp = self.delete_json( + "/annotation/categories", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + logger.info(f"Deleted category {category_id}") + return resp diff --git a/test_automation_framework/helpers/constants.py b/test_automation_framework/helpers/constants.py new file mode 100644 index 000000000..fae538694 --- /dev/null +++ b/test_automation_framework/helpers/constants.py @@ -0,0 +1,2 @@ +AIRFLOW_PIPELINE = "airflow" +PRINT_PIPELINE = "print" diff --git a/test_automation_framework/helpers/datasets/dataset_client.py b/test_automation_framework/helpers/datasets/dataset_client.py new file mode 100644 index 000000000..653213a6e --- /dev/null +++ b/test_automation_framework/helpers/datasets/dataset_client.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +from helpers.base_client.base_client import BaseClient +import logging + +logger = logging.getLogger(__name__) + + +class DatasetClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def search( + self, + page_num: int = 1, + page_size: int = 100, + filters: list[dict] | None = None, + sorting: list[dict] | None = None, + ) -> dict: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters or [], + "sorting": sorting or [{"direction": "asc", "field": "name"}], + } + return self.post_json( + "/assets/datasets/search", json=payload, headers=self._default_headers(content_type_json=True) + ) + + def search_files( + self, + dataset_id: int | None = None, + page_num: int = 1, + page_size: int = 15, + ) -> dict: + filters = [] + if dataset_id is not None: + filters.append({"field": "datasets.id", "operator": "eq", "value": dataset_id}) + else: + filters.append({"field": "original_name", "operator": "ilike", "value": "%%"}) + + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters, + "sorting": [{"direction": "desc", "field": "last_modified"}], + } + + return self.post_json( + "/assets/files/search", json=payload, headers=self._default_headers(content_type_json=True) + ) + + def create_dataset(self, name: str) -> dict: + payload = {"name": name} + resp = self.post_json("/assets/datasets", json=payload, headers=self._default_headers(content_type_json=True)) + logger.info(f"Created dataset {name}") + return resp + + def delete_dataset(self, name: str) -> dict: + payload = {"name": name} + resp = self.delete_json("/assets/datasets", json=payload, headers=self._default_headers(content_type_json=True)) + logger.info(f"Deleted dataset {name}") + return resp diff --git a/test_automation_framework/helpers/files/file_client.py b/test_automation_framework/helpers/files/file_client.py new file mode 100644 index 000000000..ca7a33b95 --- /dev/null +++ b/test_automation_framework/helpers/files/file_client.py @@ -0,0 +1,93 @@ +from __future__ import annotations +from helpers.base_client.base_client import BaseClient +import logging +from typing import List +import shutil +import uuid +from pathlib import Path +import httpx +from helpers.base_client.base_client import HTTPError + +logger = logging.getLogger(__name__) + + +class FileClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def upload_file(self, file_path: str) -> dict: + with open(file_path, "rb") as f: + files = {"files": (file_path.split("/")[-1], f, "application/pdf")} + resp = self.post("/assets/files", files=files, headers=self._default_headers()) + logger.info(f"Uploaded file {file_path}") + return resp.json() + + def delete_files(self, ids: List[int]) -> dict: + resp = self.delete_json( + "/assets/files", + json={"objects": ids}, + headers=self._default_headers(content_type_json=True), + ) + logger.info(f"Deleted file {ids}") + return resp + + def search_files( + self, + page_num: int = 1, + page_size: int = 15, + filters: list[dict] | None = None, + ) -> dict: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "filters": filters or [{"field": "original_name", "operator": "ilike", "value": "%%"}], + "sorting": [{"direction": "desc", "field": "last_modified"}], + } + return self.post_json( + "/assets/files/search", json=payload, headers=self._default_headers(content_type_json=True) + ) + + def move_files(self, name: str, objects: list) -> dict: + payload = {"name": name, "objects": objects} + resp = self.post_json( + "/assets/datasets/bonds", json=payload, headers=self._default_headers(content_type_json=True) + ) + logger.info(f"Moved object {objects} to the dataset {name}") + return resp + + @staticmethod + def upload_temp_file(client, file_tracker, tmp_path, suffix="pdf"): + data_dir = Path(__file__).parent.parent.parent / "data" + original_file = data_dir / "multivitamin.pdf" + unique_name = f"{uuid.uuid4().hex}.{suffix}" + temp_file = tmp_path / unique_name + shutil.copy(original_file, temp_file) + result = client.upload_file(str(temp_file)) + file_info = result[0] + assert file_info["status"] is True + file_tracker[0].append(file_info) + return file_info, temp_file + + def download_file(self, file_id: int) -> bytes: + resp = self._client.get( + f"{self.base_url}/assets/download?file_id={file_id}", + headers=self._default_headers(), + follow_redirects=False, + ) + + if resp.status_code >= 400: + raise HTTPError( + f"GET {resp.request.url} -> {resp.status_code}", + status_code=resp.status_code, + body=resp.text, + ) + + if resp.status_code == 302 and "location" in resp.headers: + s3_resp = httpx.get(resp.headers["location"]) + s3_resp.raise_for_status() + return s3_resp.content + + raise HTTPError( + f"Unexpected response {resp.status_code} for file_id={file_id}", + status_code=resp.status_code, + body=resp.text, + ) diff --git a/test_automation_framework/helpers/files/file_client_frontend.py b/test_automation_framework/helpers/files/file_client_frontend.py new file mode 100644 index 000000000..554724e93 --- /dev/null +++ b/test_automation_framework/helpers/files/file_client_frontend.py @@ -0,0 +1,56 @@ +import uuid +import shutil +from pathlib import Path +from playwright.sync_api import Page +from logging import getLogger +import time + +logger = getLogger(__name__) + + +class FrontendFileHelper: + @staticmethod + def prepare_temp_files(tmp_path, num_files=1, suffix="pdf", base_file="multivitamin.pdf"): + data_dir = Path(__file__).parent.parent.parent / "data" + original_file = data_dir / base_file + temp_files = [] + + for _ in range(num_files): + unique_name = f"{uuid.uuid4().hex}.{suffix}" + temp_file = tmp_path / unique_name + shutil.copy(original_file, temp_file) + temp_files.append(temp_file) + + return temp_files + + @staticmethod + def upload_files(page: Page, temp_files, file_tracker=None, client=None, timeout_seconds=30): + page.locator("input[type='file']").set_input_files([str(f) for f in temp_files]) + + page.get_by_role("button", name="Next").click() + success_msgs = page.locator("text=Successfully uploaded, converted") + end_time = time.time() + timeout_seconds + while success_msgs.count() < len(temp_files): + if time.time() > end_time: + raise RuntimeError(f"Not all upload success messages appeared within {timeout_seconds}s") + time.sleep(0.5) + + uploaded_infos = [] + + if file_tracker is not None and client is not None: + for temp_file in temp_files: + end_time = time.time() + timeout_seconds + while time.time() < end_time: + search_resp = client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": temp_file.name}] + ) + if search_resp["data"]: + file_info = search_resp["data"][0] + file_tracker[0].append(file_info) + uploaded_infos.append(file_info) + break + time.sleep(1) + else: + raise RuntimeError(f"Uploaded file {temp_file.name} not found in backend after {timeout_seconds}s") + + return uploaded_infos diff --git a/test_automation_framework/helpers/jobs/jobs_client.py b/test_automation_framework/helpers/jobs/jobs_client.py new file mode 100644 index 000000000..d99228002 --- /dev/null +++ b/test_automation_framework/helpers/jobs/jobs_client.py @@ -0,0 +1,118 @@ +from __future__ import annotations +from typing import Any, Dict, List +import time +import logging +from helpers.constants import AIRFLOW_PIPELINE, PRINT_PIPELINE + +from helpers.base_client.base_client import BaseClient + +logger = logging.getLogger(__name__) + + +class JobsClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def get_supported_pipelines(self) -> List[Dict[str, Any]]: + return self.get_json("/jobs/pipelines/support", headers=self._default_headers()) + + def get_pipeline(self, engine_resource: str) -> Dict[str, Any]: + return self.get_json(f"/jobs/pipelines/{engine_resource}", headers=self._default_headers()) + + def create_job( + self, + name: str, + file_ids: list[int], + owners: list[str], + pipeline_id: str = PRINT_PIPELINE, + pipeline_engine: str = AIRFLOW_PIPELINE, + datasets: list[int] | None = None, + categories: list[str] | None = None, + annotators: list[str] | None = None, + validators: list[str] | None = None, + previous_jobs: list[int] | None = None, + revisions: list[int] | None = None, + is_draft: bool = False, + is_auto_distribution: bool = False, + start_manual_job_automatically: bool = False, + job_type: str = "ExtractionJob", + pipeline_name: str | None = None, + ): + payload = { + "name": name, + "revisions": revisions or [], + "datasets": datasets or [], + "files": file_ids, + "previous_jobs": previous_jobs or [], + "type": job_type, + "is_draft": is_draft, + "is_auto_distribution": is_auto_distribution, + "start_manual_job_automatically": start_manual_job_automatically, + "categories": categories or [], + "owners": owners or [], + "annotators": annotators or [], + "validators": validators or [], + "pipeline_name": pipeline_name or pipeline_id, + "pipeline_id": pipeline_id, + "pipeline_engine": pipeline_engine, + } + + return self.post_json( + "/jobs/jobs/create_job", json=payload, headers=self._default_headers(content_type_json=True) + ) + + def search_jobs( + self, + page_num: int = 1, + page_size: int = 15, + ) -> dict: + payload = { + "pagination": {"page_num": page_num, "page_size": page_size}, + "sorting": [{"direction": "desc", "field": "creation_datetime"}], + } + return self.post_json("/jobs/jobs/search", json=payload, headers=self._default_headers(content_type_json=True)) + + def get_job(self, job_id: int) -> Dict[str, Any]: + return self.get_json(f"/jobs/jobs/{job_id}", headers=self._default_headers()) + + def get_progress(self, job_id: int) -> Dict[str, Any]: + return self.post_json( + "/jobs/jobs/progress", json=[job_id], headers=self._default_headers(content_type_json=True) + ) + + def poll_until_finished( + self, + job_id: int, + timeout_seconds: int = 120, + interval_seconds: float = 1.0, + backoff_factor: float = 1.5, + ) -> Dict[str, Any]: + start = time.monotonic() + current_interval = interval_seconds + + logger.info(f"Polling job {job_id} until finished (timeout {timeout_seconds}s)") + while True: + elapsed = time.monotonic() - start + if elapsed > timeout_seconds: + raise TimeoutError(f"Job {job_id} not finished after {timeout_seconds}s") + job_obj = self.get_job(job_id) + status = job_obj.get("status") or job_obj.get("data", {}).get("status") + logger.info(f"Polled job {job_id} status: {status}") + + if status and str(status).lower() in {"finished", "success", "completed"}: + logger.info(f"Job {job_id} finished with status={status}") + return job_obj + try: + progress = self.get_progress(job_id) + if isinstance(progress, dict): + for k, v in progress.items(): + if str(k) == str(job_id) and isinstance(v, dict): + fin = v.get("finished") + tot = v.get("total") + if fin is not None and tot is not None and fin >= tot: + logger.info("Progress shows job finished (finished>=total)") + return self.get_job(job_id) + except Exception: + logger.debug(f"Progress probe failed for job {job_id}; will retry") + time.sleep(current_interval) + current_interval = min(current_interval * backoff_factor, 10.0) diff --git a/test_automation_framework/helpers/menu/menu_client.py b/test_automation_framework/helpers/menu/menu_client.py new file mode 100644 index 000000000..9081dbc88 --- /dev/null +++ b/test_automation_framework/helpers/menu/menu_client.py @@ -0,0 +1,10 @@ +from __future__ import annotations +from helpers.base_client.base_client import BaseClient + + +class MenuClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def get_menu(self) -> list[dict]: + return self.get_json("/core/menu", headers=self._default_headers()) diff --git a/test_automation_framework/helpers/plugins/plugins_client.py b/test_automation_framework/helpers/plugins/plugins_client.py new file mode 100644 index 000000000..b6cf929df --- /dev/null +++ b/test_automation_framework/helpers/plugins/plugins_client.py @@ -0,0 +1,91 @@ +from typing import List +import logging +from helpers.base_client.base_client import BaseClient +from dataclasses import dataclass + + +@dataclass +class Plugin: + id: int + name: str + description: str + version: str + menu_name: str + url: str + tenant: str + is_autoinstalled: bool + is_iframe: bool + + +logger = logging.getLogger(__name__) + + +class PluginsClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def get_plugins(self) -> List[Plugin]: + raw = self.get_json("/core/plugins", headers=self._default_headers()) + return [Plugin(**item) for item in raw] + + def create_plugin( + self, + name: str, + menu_name: str, + url: str, + version: str = "1", + description: str = "", + is_iframe: bool = True, + ) -> Plugin: + payload = { + "name": name, + "menu_name": menu_name, + "description": description, + "version": version, + "url": url, + "is_iframe": is_iframe, + } + + headers = self._default_headers(content_type_json=True) + headers.update( + { + "Accept": "*/*", + "Accept-Encoding": "gzip, deflate", + "Accept-Language": "en-US,en;q=0.5", + "Connection": "keep-alive", + "DNT": "1", + "Origin": "http://demo.badgerdoc.com:8083", + "Priority": "u=0", + "Referer": "http://demo.badgerdoc.com:8083/", + "Sec-GPC": "1", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:138.0) Gecko/20100101 Firefox/138.0", + } + ) + + try: + resp = self.post_json( + "/core/plugins", + json=payload, + headers=headers, + ) + return Plugin(**resp) + except Exception as e: + logger.error(f"Failed to create plugin: {e}") + if hasattr(e, "body"): + logger.error(f"Response body: {e.body}") + raise + + def update_plugin(self, plugin_id: int, **fields) -> Plugin: + resp = self.put_json( + f"/core/plugins/{plugin_id}", + json=fields, + headers=self._default_headers(content_type_json=True), + ) + return Plugin(**resp) + + def delete_plugin(self, plugin_id: int) -> Plugin: + resp = self.delete_json( + f"/core/plugins/{plugin_id}", + headers=self._default_headers(content_type_json=True), + ) + return Plugin(**resp) diff --git a/test_automation_framework/helpers/reports/reports_client.py b/test_automation_framework/helpers/reports/reports_client.py new file mode 100644 index 000000000..69ea929ce --- /dev/null +++ b/test_automation_framework/helpers/reports/reports_client.py @@ -0,0 +1,30 @@ +from typing import List +import logging +from helpers.base_client.base_client import BaseClient + +logger = logging.getLogger(__name__) + + +class ReportsClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def export_tasks( + self, + user_ids: List[str], + date_from: str, + date_to: str, + ) -> str: + payload = { + "user_ids": user_ids, + "date_from": date_from, + "date_to": date_to, + } + resp = self.post( + "/annotation/tasks/export", + json=payload, + headers=self._default_headers(content_type_json=True), + ) + resp.raise_for_status() + logger.info(f"Exported tasks for users={user_ids} from {date_from} to {date_to}") + return resp.text diff --git a/test_automation_framework/helpers/steps/jobs_creation.py b/test_automation_framework/helpers/steps/jobs_creation.py new file mode 100644 index 000000000..1af850316 --- /dev/null +++ b/test_automation_framework/helpers/steps/jobs_creation.py @@ -0,0 +1,337 @@ +import uuid +from playwright.sync_api import Page, expect +from helpers.files.file_client_frontend import FrontendFileHelper +from logging import getLogger +import datetime + +logger = getLogger(__name__) + + +def select_dataset(page: Page, dataset_type: str, dataset_name: str = None): + logger.info(f"Select dataset option: {dataset_type}") + if dataset_type == "none": + page.locator("label:has-text('No') div").nth(1).click() + elif dataset_type == "existing": + page.locator("label:has-text('Existing dataset') div").nth(1).click() + page.locator(".uui-icon.uui-enabled.uui-icon-dropdown").click() + page.get_by_text(dataset_name, exact=True).click() + elif dataset_type == "new": + page.locator("label:has-text('New dataset') div").nth(1).click() + page.get_by_role("textbox", name="Dataset name").fill(dataset_name) + else: + raise ValueError(f"Unknown dataset_type: {dataset_type}") + page.get_by_role("button", name="Next").click() + + +def select_preprocessor(page: Page, preprocessor: str = None, click_next: bool = True) -> None: + logger.info(f"Select preprocessor: {preprocessor or 'No need'}") + if preprocessor is None: + page.get_by_text("No need for preprocessor").click() + elif preprocessor == "any": + preprocessor_section = page.get_by_text("Select preprocessor").locator("..").locator("..") + preprocessor_section.locator("label").nth(1).click() + else: + page.get_by_text(preprocessor, exact=True).click() + if click_next: + page.get_by_role("button", name="Next").click() + + +def select_language(page: Page, language: str = None): + if language: + logger.info(f"Select language: {language}") + page.get_by_role("textbox", name="Please select").click() + page.get_by_text(language, exact=True).click() + page.get_by_role("button", name="Next").click() + + +def fill_job_and_start( + page: Page, jobs_client, job_name: str, pipeline_manager=None, pipeline="print", save_as_draft=False +): + job_name = job_name if job_name else f"test_job_{uuid.uuid4().hex[:8]}" + logger.info(f"Fill job name: {job_name}") + page.get_by_role("textbox", name="Job name").fill(job_name) + + if pipeline_manager: + logger.info(f"Select pipeline manager: {pipeline_manager}") + page.get_by_text(pipeline_manager).nth(1).click() + + logger.info("Select pipeline dropdown") + page.get_by_role("textbox", name="Select pipeline").click() + page.get_by_text(pipeline, exact=True).click() + + if save_as_draft: + logger.info("Save as draft") + page.get_by_role("button", name="Save as Draft").click() + else: + logger.info("Start extraction") + page.get_by_role("button", name="Start Extraction").click() + page.wait_for_url("**/jobs/**", timeout=20000) + jobs = jobs_client.search_jobs() + job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) + assert job_id, f"Job with name {job_name} not found!" + if not save_as_draft: + jobs_client.poll_until_finished(job_id, timeout_seconds=180) + page.reload() + expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + else: + page.reload() + expect(page.get_by_text("Draft")).to_be_visible(timeout=10000) + + +def select_human_in_the_loop_and_start( + page: Page, + jobs_client, + job_name: str, + validation_type: str = "Cross validation", + day: str | None = None, + annotator: str = "admin", + categories: list[str] = None, + distribute_tasks: bool = False, +): + if not categories: + categories = ["Age"] + logger.info("Select Human in the loop") + page.get_by_role("tab", name="Human in the Loop").click() + + logger.info("Select validation type") + page.get_by_role("textbox", name="Select validation type").click() + page.get_by_text(validation_type, exact=True).click() + + page.get_by_role("textbox", name="DD/MM/YYYY").click() + if not day: + day = datetime.datetime.today().day + 1 + logger.info(f"Select date {day}") + page.get_by_text(str(day), exact=True).click() + + logger.info("Select annotator") + page.get_by_role("textbox", name="Select Annotators and").click() + page.get_by_role("listbox").get_by_text(annotator).click(force=True) + page.locator(".uui-input-box.-clickable.uui-focus").click() + + page.get_by_role("textbox", name="Select categories").click() + for category in categories: + logger.info(f"Select category: {category}") + page.get_by_text(category, exact=True).click() + + if distribute_tasks: + logger.info("Distribute annotation tasks") + page.get_by_text("Distribute annotation tasks").click() + + fill_job_and_start(page, jobs_client, job_name) + + +def prepare_files(page: Page, file_tracker, frontend_file_helper: FrontendFileHelper, tmp_path, num_files, client): + logger.info(f"Prepare {num_files} temp files") + temp_files = frontend_file_helper.prepare_temp_files(tmp_path, num_files=num_files) + files = frontend_file_helper.upload_files(page, temp_files, file_tracker=file_tracker, client=client) + return files + + +def select_files(page: Page, document_names): + logger.info(f"Select files: {document_names}") + for file_name in document_names: + row = page.locator(f"text={file_name}").first + print(f'clicking file "{file_name}"') + checkbox_label = row.locator("xpath=preceding-sibling::label") + checkbox_label.click(force=True) + + +def select_first_element(page: Page): + logger.info("Select first element in the table") + first_row = page.locator('div[role="row"]').nth(1) + checkbox = first_row.locator("label.uui-checkbox-container") + checkbox.click(force=True) + + +def run_upload_workflow( + page: Page, + frontend_file_helper: FrontendFileHelper, + num_files: int, + file_tracker, + client, + jobs_client, + dataset_type: str = "none", # "none", "existing", "new" + dataset_name: str = None, + tmp_path=None, + language: str = None, + preprocessor: str = None, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + logger.info("Open wizard") + page.get_by_role("button", name="Upload Wizard").click() + + prepare_files(page, file_tracker, frontend_file_helper, tmp_path, num_files, client) + + select_dataset(page, dataset_type, dataset_name) + + select_preprocessor(page, preprocessor=preprocessor, click_next=not language) + + select_language(page, language) + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + + +def create_file_in_dataset( + dataset_tracker, + file_tracker, + tmp_path, + num_files, +): + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + dataset = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in dataset["detail"].lower() + first_dataset_id = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}])[ + "data" + ][0]["id"] + + created_files, client = file_tracker + for i in range(num_files): + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + move1 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + files = [file["file_name"] for file in created_files] + return dataset_name, files + + +def run_new_job_documents_workflow( + page: Page, + num_files: int, + file_tracker, + jobs_client, + dataset_tracker, + tmp_path=None, + job_name: str = None, + human_in_loop: bool = False, + validation_type="Cross validation", + pipeline_manager: str = None, + distribute_tasks: bool = False, +): + dataset_name, files = create_file_in_dataset( + dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker + ) + + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + + select_files(page, files) + page.get_by_role("button", name="Next").click() + + if human_in_loop: + select_human_in_the_loop_and_start( + page, jobs_client, job_name, validation_type=validation_type, distribute_tasks=distribute_tasks + ) + else: + if pipeline_manager == "Other": + fill_job_and_start( + page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager, pipeline="AI by MCP" + ) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + + +def run_new_job_first_line_workflow( + page: Page, + num_files: int, + file_tracker, + jobs_client, + dataset_tracker, + tab_button, + tmp_path=None, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, + save_as_draft: bool = False, +): + dataset_name, files = create_file_in_dataset( + dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker + ) + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + page.get_by_role("tab", name=tab_button, exact=True).click() + if tab_button == "Datasets": + select_files(page, [dataset_name]) + else: + select_first_element(page) + page.get_by_role("button", name="Next").click() + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start( + page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager, save_as_draft=save_as_draft + ) + + +def run_new_job_multi_tab_workflow( + page: Page, + jobs_client, + file_tracker, + dataset_tracker, + tmp_path, + tabs: list[str], + num_files=1, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + logger.info(f"Preparing temp document for tabs: {tabs}") + dataset_name, files = create_file_in_dataset( + dataset_tracker=dataset_tracker, tmp_path=tmp_path, num_files=num_files, file_tracker=file_tracker + ) + + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + + for tab in tabs: + logger.info(f"Go to tab: {tab}") + page.get_by_role("tab", name=tab, exact=True).click() + + if tab == "Documents": + select_files(page, files) + else: + select_first_element(page) + + page.get_by_role("button", name="Next").click() + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) + + +def run_new_job_dataset_without_documents_workflow( + page: Page, + jobs_client, + dataset_tracker, + job_name: str = None, + human_in_loop: bool = False, + pipeline_manager: str = None, +): + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + dataset = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in dataset["detail"].lower() + + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + page.get_by_role("tab", name="Datasets", exact=True).click() + + select_files(page, [dataset_name]) + page.get_by_role("button", name="Next").click() + + if human_in_loop: + select_human_in_the_loop_and_start(page, jobs_client, job_name) + else: + fill_job_and_start(page, jobs_client, job_name=job_name, pipeline_manager=pipeline_manager) diff --git a/test_automation_framework/helpers/users/users.py b/test_automation_framework/helpers/users/users.py new file mode 100644 index 000000000..3222d371f --- /dev/null +++ b/test_automation_framework/helpers/users/users.py @@ -0,0 +1,37 @@ +from __future__ import annotations +from typing import Any, Dict, List, Optional +from pydantic import BaseModel + +from helpers.base_client.base_client import BaseClient + + +class UserAccess(BaseModel): + manageGroupMembership: bool + view: bool + mapRoles: bool + impersonate: bool + manage: bool + + +class UserResponse(BaseModel): + id: str + username: str + enabled: bool + email: Optional[str] = None + emailVerified: Optional[bool] = None + firstName: Optional[str] = None + lastName: Optional[str] = None + attributes: Optional[Dict[str, Any]] = None + access: Optional[UserAccess] = None + + +class UsersClient(BaseClient): + def __init__(self, base_url: str, token: str, tenant: str) -> None: + super().__init__(base_url, token=token, tenant=tenant) + + def search_users(self, filters: Optional[List[Dict[str, Any]]] = None) -> List[UserResponse]: + payload = {"filters": filters or []} + resp = self.post_json( + "/users/users/search", json=payload, headers=self._default_headers(content_type_json=True) + ) + return [UserResponse.model_validate(u) for u in resp] diff --git a/test_automation_framework/pdm.lock b/test_automation_framework/pdm.lock new file mode 100644 index 000000000..87dac6fc8 --- /dev/null +++ b/test_automation_framework/pdm.lock @@ -0,0 +1,585 @@ +# This file is @generated by PDM. +# It is not intended for manual editing. + +[metadata] +groups = ["default"] +strategy = ["inherit_metadata"] +lock_version = "4.5.0" +content_hash = "sha256:dae4296a33b08d8e1097eca715ab509b87a4de17a1323c1c611b25724b9b5cf5" + +[[metadata.targets]] +requires_python = "==3.13.*" + +[[package]] +name = "annotated-types" +version = "0.7.0" +requires_python = ">=3.8" +summary = "Reusable constraint types to use with typing.Annotated" +groups = ["default"] +dependencies = [ + "typing-extensions>=4.0.0; python_version < \"3.9\"", +] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.10.0" +requires_python = ">=3.9" +summary = "High-level concurrency and networking framework on top of asyncio or Trio" +groups = ["default"] +dependencies = [ + "exceptiongroup>=1.0.2; python_version < \"3.11\"", + "idna>=2.8", + "sniffio>=1.1", + "typing-extensions>=4.5; python_version < \"3.13\"", +] +files = [ + {file = "anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1"}, + {file = "anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6"}, +] + +[[package]] +name = "certifi" +version = "2025.8.3" +requires_python = ">=3.7" +summary = "Python package for providing Mozilla's CA Bundle." +groups = ["default"] +files = [ + {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"}, + {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"}, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +requires_python = ">=3.8" +summary = "Validate configuration and produce human readable error messages." +groups = ["default"] +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.3" +requires_python = ">=3.7" +summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +groups = ["default"] +files = [ + {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"}, + {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"}, + {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +summary = "Cross-platform colored terminal text." +groups = ["default"] +marker = "sys_platform == \"win32\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "distlib" +version = "0.4.0" +summary = "Distribution utilities" +groups = ["default"] +files = [ + {file = "distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16"}, + {file = "distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d"}, +] + +[[package]] +name = "dotenv" +version = "0.9.9" +summary = "Deprecated package" +groups = ["default"] +dependencies = [ + "python-dotenv", +] +files = [ + {file = "dotenv-0.9.9-py2.py3-none-any.whl", hash = "sha256:29cf74a087b31dafdb5a446b6d7e11cbce8ed2741540e2339c69fbef92c94ce9"}, +] + +[[package]] +name = "filelock" +version = "3.19.1" +requires_python = ">=3.9" +summary = "A platform independent file lock." +groups = ["default"] +files = [ + {file = "filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d"}, + {file = "filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58"}, +] + +[[package]] +name = "greenlet" +version = "3.2.4" +requires_python = ">=3.9" +summary = "Lightweight in-process concurrent programming" +groups = ["default"] +files = [ + {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, + {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, + {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, +] + +[[package]] +name = "h11" +version = "0.16.0" +requires_python = ">=3.8" +summary = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +groups = ["default"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +requires_python = ">=3.8" +summary = "A minimal low-level HTTP client." +groups = ["default"] +dependencies = [ + "certifi", + "h11>=0.16", +] +files = [ + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, +] + +[[package]] +name = "httpx" +version = "0.28.1" +requires_python = ">=3.8" +summary = "The next generation HTTP client." +groups = ["default"] +dependencies = [ + "anyio", + "certifi", + "httpcore==1.*", + "idna", +] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[[package]] +name = "identify" +version = "2.6.13" +requires_python = ">=3.9" +summary = "File identification library for Python" +groups = ["default"] +files = [ + {file = "identify-2.6.13-py2.py3-none-any.whl", hash = "sha256:60381139b3ae39447482ecc406944190f690d4a2997f2584062089848361b33b"}, + {file = "identify-2.6.13.tar.gz", hash = "sha256:da8d6c828e773620e13bfa86ea601c5a5310ba4bcd65edf378198b56a1f9fb32"}, +] + +[[package]] +name = "idna" +version = "3.10" +requires_python = ">=3.6" +summary = "Internationalized Domain Names in Applications (IDNA)" +groups = ["default"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +requires_python = ">=3.8" +summary = "brain-dead simple config-ini parsing" +groups = ["default"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +summary = "Node.js virtual environment builder" +groups = ["default"] +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "25.0" +requires_python = ">=3.8" +summary = "Core utilities for Python packages" +groups = ["default"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +requires_python = ">=3.9" +summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +groups = ["default"] +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[[package]] +name = "playwright" +version = "1.55.0" +requires_python = ">=3.9" +summary = "A high-level API to automate web browsers" +groups = ["default"] +dependencies = [ + "greenlet<4.0.0,>=3.1.1", + "pyee<14,>=13", +] +files = [ + {file = "playwright-1.55.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:d7da108a95001e412effca4f7610de79da1637ccdf670b1ae3fdc08b9694c034"}, + {file = "playwright-1.55.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8290cf27a5d542e2682ac274da423941f879d07b001f6575a5a3a257b1d4ba1c"}, + {file = "playwright-1.55.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:25b0d6b3fd991c315cca33c802cf617d52980108ab8431e3e1d37b5de755c10e"}, + {file = "playwright-1.55.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:c6d4d8f6f8c66c483b0835569c7f0caa03230820af8e500c181c93509c92d831"}, + {file = "playwright-1.55.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a0777c4ce1273acf90c87e4ae2fe0130182100d99bcd2ae5bf486093044838"}, + {file = "playwright-1.55.0-py3-none-win32.whl", hash = "sha256:29e6d1558ad9d5b5c19cbec0a72f6a2e35e6353cd9f262e22148685b86759f90"}, + {file = "playwright-1.55.0-py3-none-win_amd64.whl", hash = "sha256:7eb5956473ca1951abb51537e6a0da55257bb2e25fc37c2b75af094a5c93736c"}, + {file = "playwright-1.55.0-py3-none-win_arm64.whl", hash = "sha256:012dc89ccdcbd774cdde8aeee14c08e0dd52ddb9135bf10e9db040527386bd76"}, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +requires_python = ">=3.9" +summary = "plugin and hook calling mechanisms for python" +groups = ["default"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[[package]] +name = "pre-commit" +version = "4.3.0" +requires_python = ">=3.9" +summary = "A framework for managing and maintaining multi-language pre-commit hooks." +groups = ["default"] +dependencies = [ + "cfgv>=2.0.0", + "identify>=1.0.0", + "nodeenv>=0.11.1", + "pyyaml>=5.1", + "virtualenv>=20.10.0", +] +files = [ + {file = "pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8"}, + {file = "pre_commit-4.3.0.tar.gz", hash = "sha256:499fe450cc9d42e9d58e606262795ecb64dd05438943c62b66f6a8673da30b16"}, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +requires_python = ">=3.9" +summary = "Data validation using Python type hints" +groups = ["default"] +dependencies = [ + "annotated-types>=0.6.0", + "pydantic-core==2.33.2", + "typing-extensions>=4.12.2", + "typing-inspection>=0.4.0", +] +files = [ + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +requires_python = ">=3.9" +summary = "Core functionality for Pydantic validation and serialization" +groups = ["default"] +dependencies = [ + "typing-extensions!=4.7.0,>=4.6.0", +] +files = [ + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[[package]] +name = "pydantic-settings" +version = "2.10.1" +requires_python = ">=3.9" +summary = "Settings management using Pydantic" +groups = ["default"] +dependencies = [ + "pydantic>=2.7.0", + "python-dotenv>=0.21.0", + "typing-inspection>=0.4.0", +] +files = [ + {file = "pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796"}, + {file = "pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee"}, +] + +[[package]] +name = "pyee" +version = "13.0.0" +requires_python = ">=3.8" +summary = "A rough port of Node.js's EventEmitter to Python with a few tricks of its own" +groups = ["default"] +dependencies = [ + "typing-extensions", +] +files = [ + {file = "pyee-13.0.0-py3-none-any.whl", hash = "sha256:48195a3cddb3b1515ce0695ed76036b5ccc2ef3a9f963ff9f77aec0139845498"}, + {file = "pyee-13.0.0.tar.gz", hash = "sha256:b391e3c5a434d1f5118a25615001dbc8f669cf410ab67d04c4d4e07c55481c37"}, +] + +[[package]] +name = "pygments" +version = "2.19.2" +requires_python = ">=3.8" +summary = "Pygments is a syntax highlighting package written in Python." +groups = ["default"] +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] + +[[package]] +name = "pytest" +version = "8.4.1" +requires_python = ">=3.9" +summary = "pytest: simple powerful testing with Python" +groups = ["default"] +dependencies = [ + "colorama>=0.4; sys_platform == \"win32\"", + "exceptiongroup>=1; python_version < \"3.11\"", + "iniconfig>=1", + "packaging>=20", + "pluggy<2,>=1.5", + "pygments>=2.7.2", + "tomli>=1; python_version < \"3.11\"", +] +files = [ + {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, + {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, +] + +[[package]] +name = "pytest-base-url" +version = "2.1.0" +requires_python = ">=3.8" +summary = "pytest plugin for URL based testing" +groups = ["default"] +dependencies = [ + "pytest>=7.0.0", + "requests>=2.9", +] +files = [ + {file = "pytest_base_url-2.1.0-py3-none-any.whl", hash = "sha256:3ad15611778764d451927b2a53240c1a7a591b521ea44cebfe45849d2d2812e6"}, + {file = "pytest_base_url-2.1.0.tar.gz", hash = "sha256:02748589a54f9e63fcbe62301d6b0496da0d10231b753e950c63e03aee745d45"}, +] + +[[package]] +name = "pytest-playwright" +version = "0.7.0" +requires_python = ">=3.9" +summary = "A pytest wrapper with fixtures for Playwright to automate web browsers" +groups = ["default"] +dependencies = [ + "playwright>=1.18", + "pytest-base-url<3.0.0,>=1.0.0", + "pytest<9.0.0,>=6.2.4", + "python-slugify<9.0.0,>=6.0.0", +] +files = [ + {file = "pytest_playwright-0.7.0-py3-none-any.whl", hash = "sha256:2516d0871fa606634bfe32afbcc0342d68da2dbff97fe3459849e9c428486da2"}, + {file = "pytest_playwright-0.7.0.tar.gz", hash = "sha256:b3f2ea514bbead96d26376fac182f68dcd6571e7cb41680a89ff1673c05d60b6"}, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +requires_python = ">=3.9" +summary = "Read key-value pairs from a .env file and set them as environment variables" +groups = ["default"] +files = [ + {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, + {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, +] + +[[package]] +name = "python-slugify" +version = "8.0.4" +requires_python = ">=3.7" +summary = "A Python slugify application that also handles Unicode" +groups = ["default"] +dependencies = [ + "text-unidecode>=1.3", +] +files = [ + {file = "python-slugify-8.0.4.tar.gz", hash = "sha256:59202371d1d05b54a9e7720c5e038f928f45daaffe41dd10822f3907b937c856"}, + {file = "python_slugify-8.0.4-py2.py3-none-any.whl", hash = "sha256:276540b79961052b66b7d116620b36518847f52d5fd9e3a70164fc8c50faa6b8"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +requires_python = ">=3.8" +summary = "YAML parser and emitter for Python" +groups = ["default"] +files = [ + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.5" +requires_python = ">=3.9" +summary = "Python HTTP for Humans." +groups = ["default"] +dependencies = [ + "certifi>=2017.4.17", + "charset-normalizer<4,>=2", + "idna<4,>=2.5", + "urllib3<3,>=1.21.1", +] +files = [ + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +requires_python = ">=3.7" +summary = "Sniff out which async library your code is running under" +groups = ["default"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "text-unidecode" +version = "1.3" +summary = "The most basic Text::Unidecode port" +groups = ["default"] +files = [ + {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, + {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, +] + +[[package]] +name = "typing-extensions" +version = "4.14.1" +requires_python = ">=3.9" +summary = "Backported and Experimental Type Hints for Python 3.9+" +groups = ["default"] +files = [ + {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, + {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +requires_python = ">=3.9" +summary = "Runtime typing introspection tools" +groups = ["default"] +dependencies = [ + "typing-extensions>=4.12.0", +] +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +requires_python = ">=3.9" +summary = "HTTP library with thread-safe connection pooling, file post, and more." +groups = ["default"] +files = [ + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, +] + +[[package]] +name = "virtualenv" +version = "20.34.0" +requires_python = ">=3.8" +summary = "Virtual Python Environment builder" +groups = ["default"] +dependencies = [ + "distlib<1,>=0.3.7", + "filelock<4,>=3.12.2", + "importlib-metadata>=6.6; python_version < \"3.8\"", + "platformdirs<5,>=3.9.1", + "typing-extensions>=4.13.2; python_version < \"3.11\"", +] +files = [ + {file = "virtualenv-20.34.0-py3-none-any.whl", hash = "sha256:341f5afa7eee943e4984a9207c025feedd768baff6753cd660c857ceb3e36026"}, + {file = "virtualenv-20.34.0.tar.gz", hash = "sha256:44815b2c9dee7ed86e387b842a84f20b93f7f417f95886ca1996a72a4138eb1a"}, +] diff --git a/test_automation_framework/pyproject.toml b/test_automation_framework/pyproject.toml new file mode 100644 index 000000000..833a1d452 --- /dev/null +++ b/test_automation_framework/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "badgerdoc_taf" +version = "0.1.0" +description = "Default template for PDM package" +authors = [ + {name = "asobolev", email = "aleksei_sobolev@epam.com"}, +] +dependencies = ["PyYAML==6.0.2", "dotenv==0.9.9", "httpx==0.28.1", "pre-commit==4.3.0", "pydantic-settings==2.10.1", "pydantic==2.11.7", "pytest==8.4.1", "playwright==1.55.0", "pytest-playwright>=0.7.0"] +requires-python = "==3.13.*" +readme = "README.md" +license = {text = "MIT"} + + +[tool.pdm] +distribution = false diff --git a/test_automation_framework/settings.py b/test_automation_framework/settings.py new file mode 100644 index 000000000..5e1174c1f --- /dev/null +++ b/test_automation_framework/settings.py @@ -0,0 +1,37 @@ +import yaml +from pathlib import Path +from pydantic_settings import BaseSettings, SettingsConfigDict +from pydantic import SecretStr + +ROOT = Path(__file__).parent +DEFAULTS_PATH = ROOT / "config" / "defaults.yaml" + + +class Settings(BaseSettings): + BASE_URL: str + BASE_PORT: int + API_USER: str + API_PASS: SecretStr + TIMEOUT_SECONDS: int = 30 + MAX_WORKERS: int = 4 + USE_MOCK_LLM: bool = True + LOG_LEVEL: str = "INFO" + LLM_API_KEY: str | None = None + + model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8") + + +def load_settings() -> Settings: + with open(DEFAULTS_PATH, "r") as f: + yaml_defaults = yaml.safe_load(f) + + from dotenv import dotenv_values + + env_data = dotenv_values(".env") + + merged = { + **yaml_defaults, + **{k: v for k, v in env_data.items() if v is not None}, + } + + return Settings(**merged) diff --git a/test_automation_framework/tests/test_auth.py b/test_automation_framework/tests/test_auth.py new file mode 100644 index 000000000..a8601873e --- /dev/null +++ b/test_automation_framework/tests/test_auth.py @@ -0,0 +1,26 @@ +from logging import getLogger + +import pytest + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestAuthAPI: + def test_basic_auth(self, auth_token): + access_token, refresh_token = auth_token + assert access_token + assert refresh_token + + def test_wrong_creds(self, auth_service): + with pytest.raises(HTTPError) as exc: + auth_service.get_token("wrong", "wrong") + assert exc.value.status_code == 401 + + def test_refresh_token(self, auth_token, auth_service): + access_token, refresh_token = auth_token + new_access, new_refresh = auth_service.refresh_token(refresh_token=refresh_token) + assert new_access != access_token + assert new_refresh != refresh_token diff --git a/test_automation_framework/tests/test_categories.py b/test_automation_framework/tests/test_categories.py new file mode 100644 index 000000000..2a064b49f --- /dev/null +++ b/test_automation_framework/tests/test_categories.py @@ -0,0 +1,124 @@ +from logging import getLogger +import uuid +from playwright.sync_api import Page, expect + + +import pytest + +logger = getLogger(__name__) + + +class TestCategories: + @pytest.mark.skip(reason="Creation works, but deletion not implemented, will be cluttered by multiple runs") + def test_create_and_delete_category(self, auth_token, settings, tenant, categories_client): + access_token, _ = auth_token + + unique_id = f"test_cat_{uuid.uuid4().hex[:6]}" + created = categories_client.create_category(category_id=unique_id, name=unique_id, parent="example") + assert created.id == unique_id + search_result = categories_client.search_categories(page_size=100) + ids = [c.id for c in search_result.data] + assert unique_id in ids, f"Category {unique_id} not found after creation" + + deleted = categories_client.delete_category(unique_id) + assert deleted.get("detail") or deleted.get("status") or "success" in str(deleted).lower() + search_after_delete = categories_client.search_categories(page_size=100) + ids_after = [c.id for c in search_after_delete.data] + assert unique_id not in ids_after, f"Category {unique_id} still present after deletion" + + +class TestCategoriesFrontend: + def test_categories_scroll(self, categories_page: Page): + page = categories_page + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + page_size_input.click() + page.locator("div[role='option']", has_text="100").click() + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + + last_row = rows.last + last_row.scroll_into_view_if_needed() + expect(last_row).to_be_visible() + + first_row = rows.first + first_row.scroll_into_view_if_needed() + expect(first_row).to_be_visible() + + def test_categories_pagination_by_page_number(self, categories_page: Page): + page = categories_page + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or rows.first.text_content() != old_text + + def test_categories_pagination_by_arrows(self, categories_page: Page): + page = categories_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + old_text_back = rows.first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or rows.first.text_content() != old_text_back + + def test_categories_show_on_page(self, categories_page: Page): + page = categories_page + + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + + expect(rows.first).to_be_visible(timeout=10000) + count = rows.count() + assert count <= int(value), f"Expected at most {value} rows, got {count}" diff --git a/test_automation_framework/tests/test_datasets.py b/test_automation_framework/tests/test_datasets.py new file mode 100644 index 000000000..f81e2efe2 --- /dev/null +++ b/test_automation_framework/tests/test_datasets.py @@ -0,0 +1,249 @@ +from logging import getLogger +import uuid +from playwright.sync_api import Page, expect + +import pytest + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestDatasets: + def test_clear_search_for_datasets(self, dataset_client): + result = dataset_client.search() + assert "pagination" in result + assert "data" in result + assert isinstance(result["data"], list) + pagination = result["pagination"] + required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} + assert required_pagination_keys <= pagination.keys() + for dataset in result["data"]: + required_dataset_keys = {"id", "name", "count", "created"} + assert required_dataset_keys <= dataset.keys() + assert isinstance(dataset["id"], int) + assert isinstance(dataset["name"], str) + assert isinstance(dataset["count"], int) + + def test_search_sorting(self, dataset_client): + result = dataset_client.search(sorting=[{"direction": "desc", "field": "name"}]) + names = [d["name"] for d in result["data"]] + assert names == sorted(names, reverse=True) + + def test_search_pagination(self, dataset_client): + result = dataset_client.search(page_num=1, page_size=15) + assert len(result["data"]) <= 15 + assert result["pagination"]["page_num"] == 1 + + def test_selection(self, dataset_client): + datasets = dataset_client.search()["data"] + assert datasets + dataset_id = datasets[0]["id"] + files_selected = dataset_client.search_files(dataset_id=dataset_id)["data"] + assert isinstance(files_selected, list) + for f in files_selected: + assert any(d["id"] == dataset_id for d in f.get("datasets", [])) + files_all = dataset_client.search_files()["data"] + assert isinstance(files_all, list) + has_dataset = any(f.get("datasets") for f in files_all) + has_no_dataset = any(not f.get("datasets") for f in files_all) + assert has_dataset or has_no_dataset + + def test_create_and_delete_dataset(self, dataset_client): + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + create_resp = dataset_client.create_dataset(name=dataset_name) + assert "detail" in create_resp + assert "successfully created" in create_resp["detail"].lower() + search_resp = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert any(d["name"] == dataset_name for d in search_resp["data"]) + delete_resp = dataset_client.delete_dataset(name=dataset_name) + assert "detail" in delete_resp + assert "successfully deleted" in delete_resp["detail"].lower() + search_after = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert all(d["name"] != dataset_name for d in search_after["data"]) + + @pytest.mark.skip(reason="Successfully creates dataset") + def test_create_dataset_with_empty_name(self, dataset_tracker): + created, client = dataset_tracker + + with pytest.raises(HTTPError) as e: + client.create_dataset(name="") + + assert e.value.status_code in (400, 422) + + def test_create_duplicate_dataset(self, dataset_tracker): + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + with pytest.raises(HTTPError) as exc: + client.create_dataset(name=dataset_name) + assert exc.value.status_code == 400 + assert "already exists" in exc.value.body.lower() + + def test_search_existing_dataset(self, dataset_tracker): + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + + search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + names = [d["name"] for d in search_resp["data"]] + assert dataset_name in names + + def test_search_non_existing_dataset(self, dataset_client): + search_resp = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": "non_existing_dataset"}] + ) + assert search_resp["data"] == [] + + def test_search_multiple_existing_datasets(self, dataset_tracker): + created, client = dataset_tracker + names = [f"autotest_{uuid.uuid4().hex[:8]}" for _ in range(2)] + for n in names: + resp = client.create_dataset(name=n) + created.append(n) + assert "successfully created" in resp["detail"].lower() + + search_resp = client.search(filters=[{"field": "name", "operator": "in", "value": names}]) + found_names = {d["name"] for d in search_resp["data"]} + assert set(names) <= found_names + + +class TestDatasetsFrontend: + def test_delete_dataset(self, logged_in_page: Page, dataset_tracker): + page = logged_in_page + + created, client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + create_resp = client.create_dataset(name=dataset_name) + assert "detail" in create_resp + assert "successfully created" in create_resp["detail"].lower() + search_resp = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert any(d["name"] == dataset_name for d in search_resp["data"]) + + created.append(dataset_name) + + row = page.locator(f"div[role='none']:has-text('{dataset_name}')") + expect(row).to_be_visible(timeout=10000) + + delete_button = row.locator("button", has=page.locator("svg")).last + delete_button.click(force=True) + + expect(row).not_to_be_visible(timeout=10000) + + search_after = client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}]) + assert all(d["name"] != dataset_name for d in search_after["data"]) + + @pytest.mark.parametrize( + "flow", + [ + {"after_cancel": "Discard"}, + {"after_cancel": "Save"}, + {"save": True, "after_cancel": None}, + ], + ) + def test_create_dataset_cancel(self, logged_in_page: Page, flow, dataset_tracker): + page = logged_in_page + created, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + page.get_by_role("textbox", name="Name").fill(dataset_name) + + if flow.get("save"): + dataset_modal.get_by_role("button", name="Save").click(force=True) + created.append(dataset_name) + dataset_modal.wait_for(state="detached", timeout=5000) + return + + dataset_modal.get_by_role("button", name="Cancel").click() + + if flow.get("after_cancel"): + page.wait_for_selector( + "div[role='modal']:has-text('Your data may be lost. Do you want to save data?')", + state="visible", + timeout=5000, + ) + + confirm_modal = page.locator("div[role='modal']").filter( + has_text="Your data may be lost. Do you want to save data?" + ) + if flow["after_cancel"] == "Save": + created.append(dataset_name) + + confirm_modal.get_by_role("button", name=flow["after_cancel"]).click(force=True) + + confirm_modal.wait_for(state="detached", timeout=5000) + dataset_modal.wait_for(state="detached", timeout=5000) + + expect(page.get_by_role("textbox", name="Name")).not_to_be_visible(timeout=5000) + + def test_create_dataset_no_name(self, logged_in_page: Page): + page = logged_in_page + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + + dataset_modal.get_by_role("button", name="Save").click(force=True) + error_message = dataset_modal.locator("div[role='alert'].uui-invalid-message") + expect(error_message).to_have_text("The field is mandatory", timeout=5000) + + def test_create_existing_dataset(self, logged_in_page: Page, dataset_tracker): + page = logged_in_page + created_datasets, client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + resp = client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in resp["detail"].lower() + + document_items = page.locator("a[class^='document-card-view-item_card-item']") + expect(document_items.first).to_be_visible(timeout=10000) + + page.get_by_role("button", name="Add new dataset").click() + dataset_modal = page.locator("div[role='modal']", has_text="Add dataset") + dataset_modal.wait_for(state="visible", timeout=5000) + page.get_by_role("textbox", name="Name").fill(dataset_name) + dataset_modal.get_by_role("button", name="Save").click(force=True) + + expect(page.locator(f"text=Dataset {dataset_name} already exists!")).to_be_visible(timeout=30000) + + @pytest.mark.parametrize("select_all", [True, False]) + def test_add_to_dataset_empty_field(self, logged_in_page: Page, select_all: bool): + page = logged_in_page + + page.locator("rect").nth(0).click(force=True) + + if select_all: + page.locator("label:has-text('Select All') div").first.click(force=True) + else: + item = page.locator("a[class^='document-card-view-item_card-item']").first + item.scroll_into_view_if_needed() + + input_el = item.locator("input[type='checkbox']").first + label = item.locator("label.uui-checkbox-container") + uui_div = item.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + expect(input_el).to_be_checked() + + add_button = page.get_by_role("button", name="Add to dataset") + add_button.click() + + choose_button = page.get_by_role("button", name="Choose") + choose_button.click() + + error_label = page.locator("div.uui-invalid-message[role='alert']") + expect(error_label).to_have_text("The field is mandatory") diff --git a/test_automation_framework/tests/test_documents.py b/test_automation_framework/tests/test_documents.py new file mode 100644 index 000000000..da8284514 --- /dev/null +++ b/test_automation_framework/tests/test_documents.py @@ -0,0 +1,562 @@ +from logging import getLogger +from datetime import datetime + +import pytest +from playwright.sync_api import Page, expect +import uuid + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class Locators: + list_view_button = ("rect:nth-child(3)",) + icon_view_button = ("rect:nth-child(6)",) + + +class TestDocumentsAPI: + def test_upload_and_delete_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + try: + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + assert "id" in file_info + assert "file_name" in file_info + search = client.search_files() + ids = [f["id"] for f in search["data"]] + assert file_info["id"] in ids + delete_result = client.delete_files([file_info["id"]]) + assert delete_result[0]["status"] is True + assert delete_result[0]["action"] == "delete" + search_after = client.search_files() + ids_after = [f["id"] for f in search_after["data"]] + assert file_info["id"] not in ids_after + created_files.clear() + finally: + if temp_file.exists(): + temp_file.unlink() + + @pytest.mark.skip(reason="Returns 500 instead of 4xx") + def test_upload_invalid_format(self, file_client, tmp_path): + invalid_file = tmp_path / f"{uuid.uuid4().hex}.py" + invalid_file.write_text("this is py file") + + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(invalid_file)) + + assert exc.value.status_code == 400 + + @pytest.mark.skip(reason="Uploads a file, but returns 500") + @pytest.mark.parametrize("content", ["", " "]) + def test_upload_empty_file(self, file_client, tmp_path, content): + empty_file = tmp_path / f"{uuid.uuid4().hex}_empty.pdf" + empty_file.write_text(content) + with pytest.raises(HTTPError) as exc: + file_client.upload_file(str(empty_file)) + assert exc.value.status_code == 400 + + def test_move_file(self, file_tracker, dataset_tracker, tmp_path): + created_datasets, dataset_client = dataset_tracker + + first_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + second_dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + + first_resp = dataset_client.create_dataset(name=first_dataset_name) + created_datasets.append(first_dataset_name) + assert "successfully created" in first_resp["detail"].lower() + first_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": first_dataset_name}] + )["data"][0]["id"] + + second_resp = dataset_client.create_dataset(name=second_dataset_name) + created_datasets.append(second_dataset_name) + assert "successfully created" in second_resp["detail"].lower() + second_dataset_id = dataset_client.search( + filters=[{"field": "name", "operator": "eq", "value": second_dataset_name}] + )["data"][0]["id"] + + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + try: + move1 = client.move_files(name=first_dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + move2 = client.move_files(name=second_dataset_name, objects=[file_id])[0] + assert move2["status"] is True + assert "successfully bounded" in move2["message"].lower() + files_in_second = dataset_client.search_files(dataset_id=second_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_second) + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_add_file_to_dataset_twice(self, file_tracker, dataset_tracker, tmp_path): + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + dataset = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in dataset["detail"].lower() + first_dataset_id = dataset_client.search(filters=[{"field": "name", "operator": "eq", "value": dataset_name}])[ + "data" + ][0]["id"] + + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + try: + move1 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move1["status"] is True + assert "successfully bounded" in move1["message"].lower() + files_in_first = dataset_client.search_files(dataset_id=first_dataset_id)["data"] + assert any(f["id"] == file_id for f in files_in_first) + move2 = client.move_files(name=dataset_name, objects=[file_id])[0] + assert move2["status"] is False + assert "already bounded" in move2["message"].lower() + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_clear_search_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + result = client.search_files() + assert "pagination" in result + assert "data" in result + assert isinstance(result["data"], list) + pagination = result["pagination"] + required_pagination_keys = {"page_num", "page_offset", "page_size", "min_pages_left", "total", "has_more"} + assert required_pagination_keys <= pagination.keys() + for file in result["data"]: + required_file_keys = { + "id", + "original_name", + "bucket", + "size_in_bytes", + "extension", + "original_ext", + "content_type", + "pages", + "last_modified", + "status", + "path", + "datasets", + } + assert required_file_keys <= file.keys() + assert isinstance(file["id"], int) + assert isinstance(file["original_name"], str) + assert isinstance(file["size_in_bytes"], int) + + def test_search_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + try: + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + search_resp = client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": file_info["file_name"]}] + ) + names = [f["original_name"] for f in search_resp["data"]] + assert file_info["file_name"] in names + finally: + if temp_file.exists(): + temp_file.unlink() + + def test_search_non_existing_file(self, file_client): + search_resp = file_client.search_files( + filters=[{"field": "original_name", "operator": "eq", "value": "definitely_not_a_file.pdf"}] + ) + assert search_resp["data"] == [] + + def test_search_multiple_existing_files(self, file_tracker, tmp_path): + created_files, client = file_tracker + f1, t1 = client.upload_temp_file(client, file_tracker, tmp_path) + f2, t2 = client.upload_temp_file(client, file_tracker, tmp_path) + names = [f1["file_name"], f2["file_name"]] + + search = client.search_files(filters=[{"field": "original_name", "operator": "in", "value": names}]) + found_names = {f["original_name"] for f in search["data"]} + assert set(names) <= found_names + + t1.unlink(missing_ok=True) + t2.unlink(missing_ok=True) + + def test_download_existing_file(self, file_tracker, tmp_path): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + file_id = file_info["id"] + + content = client.download_file(file_id) + assert isinstance(content, (bytes, bytearray)) + assert len(content) > 100 + assert content.startswith(b"%PDF") + + temp_file.unlink(missing_ok=True) + + def test_download_nonexistent_file(self, file_client): + with pytest.raises(HTTPError) as exc: + file_client.download_file(9999999) + assert exc.value.status_code == 404 + + @pytest.mark.parametrize("field", ["original_name", "last_modified", "size_in_bytes"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # name descending fails + def test_files_sorting(self, file_client, field, direction): + resp = file_client.post_json( + "/assets/files/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [{"field": "original_name", "operator": "ilike", "value": "%%"}], + "sorting": [{"direction": direction, "field": field}], + }, + headers=file_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + values = [d[field] for d in data if field in d] + + if field == "last_modified": + values = [datetime.fromisoformat(v) for v in values] + + if field == "size_in_bytes": + values = [int(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected, f"{field} not sorted {direction}" + + +class TestDocumentsFrontend: + def test_documents_scroll(self, logged_in_page: Page): + page = logged_in_page + + last_doc = page.locator('a[class*="document-card-view-item_card-item"]').last + last_doc.scroll_into_view_if_needed() + expect(last_doc).to_be_visible() + + first_doc = page.locator('a[class*="document-card-view-item_card-item"]').first + first_doc.scroll_into_view_if_needed() + expect(first_doc).to_be_visible() + + def test_documents_pagination_by_page_number(self, logged_in_page: Page): + page = logged_in_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + list_selector = 'a[class*="document-card-view-item_card-item"]' + first_doc = page.locator(list_selector).first + expect(first_doc).to_be_visible(timeout=10000) + + old_text = first_doc.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(page.locator(list_selector).first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or page.locator(list_selector).first.text_content() != old_text + + def test_documents_pagination_by_arrows(self, logged_in_page: Page): + page = logged_in_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + list_selector = 'a[class*="document-card-view-item_card-item"]' + first_doc = page.locator(list_selector).first + expect(first_doc).to_be_visible(timeout=10000) + + old_text = first_doc.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(page.locator(list_selector).first).not_to_have_text(old_text, timeout=10000) + + old_text_back = page.locator(list_selector).first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(page.locator(list_selector).first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or page.locator(list_selector).first.text_content() != old_text_back + + def test_documents_show_on_page(self, logged_in_page: Page): + page = logged_in_page + + list_selector = 'a[class*="document-card-view-item_card-item"]' + cards = page.locator(list_selector) + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + + expect(cards.first).to_be_visible(timeout=10000) + count = cards.count() + assert count <= int(value), f"Expected at most {value} cards, got {count}" + + @pytest.mark.parametrize( + "flow", + [ + {"after_cancel": "Discard"}, + {"after_cancel": "Save"}, + {"choose_first": True, "after_cancel": None}, + ], + ) + def test_add_to_dataset_cancel(self, logged_in_page: Page, flow): + page = logged_in_page + first_card = page.locator("a[class*='document-card-view-item_card-item']").first + expect(first_card).to_be_visible(timeout=10000) + first_card.scroll_into_view_if_needed() + input_el = first_card.locator("input[type='checkbox']").first + label = first_card.locator("label.uui-checkbox-container") + uui_div = first_card.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + expect(input_el).to_be_checked(timeout=5000) + + page.get_by_role("button", name="Add to dataset").click() + dataset_input = page.get_by_role("textbox", name="Please select dataset") + dataset_input.click() + first_option = page.locator("div[role='option']").first + first_option.wait_for(state="visible", timeout=5000) + first_option.click() + + if flow.get("choose_first"): + page.get_by_role("button", name="Choose").click() + page.get_by_role("button", name="Cancel").click() + + if flow.get("after_cancel"): + page.get_by_role("button", name=flow["after_cancel"]).click() + + expect(page.get_by_role("button", name="Choose")).not_to_be_visible(timeout=5000) + + def test_click_preprocess(self, logged_in_page: Page): + page = logged_in_page + first_card = page.locator("a[class*='document-card-view-item_card-item']").first + expect(first_card).to_be_visible(timeout=10000) + first_card.scroll_into_view_if_needed() + input_el = first_card.locator("input[type='checkbox']").first + label = first_card.locator("label.uui-checkbox-container") + uui_div = first_card.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + expect(input_el).to_be_checked(timeout=5000) + + preprocess_btn = page.get_by_role("button", name="Preprocess") + expect(preprocess_btn).to_be_visible(timeout=5000) + preprocess_btn.click() + # what are we checking? + + def test_add_to_extraction( + self, + logged_in_page: Page, + jobs_client, + ): + page = logged_in_page + + first_card = page.locator("a[class*='document-card-view-item_card-item']").first + expect(first_card).to_be_visible(timeout=10000) + first_card.scroll_into_view_if_needed() + + checkbox = first_card.locator("input[type='checkbox']").first + label = first_card.locator("label.uui-checkbox-container") + uui_div = first_card.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else checkbox + click_target.click(force=True) + expect(checkbox).to_be_checked(timeout=5000) + + page.get_by_role("button", name="Add to extraction").click() + + first_cell = page.get_by_role("cell").first + first_cell.locator("label div").click() + expect(first_cell.locator("input[type='checkbox']")).to_be_checked(timeout=5000) + + page.get_by_role("button", name="Next").click() + + job_name = f"extraction_job_{uuid.uuid4().hex[:8]}" + page.get_by_role("textbox", name="Job name").fill(job_name) + page.get_by_role("textbox", name="Select pipeline").click() + page.get_by_text("print", exact=True).click() + + page.get_by_role("button", name="Start Extraction").click() + + page.wait_for_url("**/jobs/**", timeout=20000) + jobs = jobs_client.search_jobs() + job_id = next((j["id"] for j in jobs["data"] if j["name"] == job_name), None) + assert job_id, f"Job with name {job_name} not found!" + jobs_client.poll_until_finished(job_id, timeout_seconds=180) + page.reload() + expect(page.get_by_text("Finished")).to_be_visible(timeout=10000) + + @pytest.mark.parametrize("num_files", [1, 3]) + @pytest.mark.parametrize("view_mode", ["card", "list"]) + def test_delete_files(self, logged_in_page: Page, file_tracker, tmp_path, num_files, view_mode): + # list view fails because deleted lines does not disappear + page = logged_in_page + created_files, client = file_tracker + temp_files = [] + uploaded_files = [] + + for _ in range(num_files): + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + assert file_info["status"] is True + uploaded_files.append(file_info) + temp_files.append(temp_file) + + page.reload() + + if view_mode == "list": + page.locator("rect").nth(1).click(force=True) + rows_selector = "div.uui-table-row-container[role='row']" + items = page.locator(rows_selector) + else: + cards_selector = 'a[class*="document-card-view-item_card-item"]' + items = page.locator(cards_selector) + + expect(items.first).to_be_visible(timeout=10000) + + selected_names = [] + for f in uploaded_files: + name = f["file_name"] + element = items.filter(has_text=name).first + expect(element).to_be_visible(timeout=10000) + + checkbox = element.locator("input[type='checkbox']").first + label = element.locator("label.uui-checkbox-container") + uui_div = element.locator("div.uui-checkbox") + click_target = label.first if label.count() else uui_div.first if uui_div.count() else checkbox + + element.scroll_into_view_if_needed() + click_target.click(force=True) + expect(checkbox).to_be_checked(timeout=5000) + + selected_names.append(name) + + delete_button = page.get_by_role("button", name="Delete") + delete_button.click(force=True) + + for name in selected_names: + expect(page.get_by_text(name)).not_to_be_visible(timeout=10000) + + remaining = client.search_files()["data"] + remaining_ids = {f["id"] for f in remaining} + for f in uploaded_files: + assert f["id"] not in remaining_ids, f"File {f['file_name']} was not deleted" + for f in created_files: + if f not in uploaded_files: + assert f["id"] in remaining_ids, f"Unrelated file {f['file_name']} was deleted" + + for temp in temp_files: + if temp.exists(): + temp.unlink() + + @pytest.mark.parametrize("rect_index", [0, 1]) + def test_select_all_unselect_all_both_views(self, logged_in_page, rect_index): + page = logged_in_page + page.locator("rect").nth(rect_index).click(force=True) + select_all = page.locator("label:has-text('Select All') div").first + select_all.click(force=True) + file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") + for i in range(file_inputs.count()): + expect(file_inputs.nth(i)).to_be_checked() + select_all = page.locator("label:has-text('selected') div").first + select_all.click(force=True) + file_inputs = page.locator("div.uui-checkbox > input[type='checkbox']") + for i in range(file_inputs.count()): + expect(file_inputs.nth(i)).not_to_be_checked() + + @pytest.mark.parametrize( + "rect_index, file_locator", + [ + (0, "a[class^='document-card-view-item_card-item']"), + (1, "div[role='cell']"), + ], + ) + def test_view_switch(self, logged_in_page: Page, rect_index: int, file_locator: str): + page = logged_in_page + page.locator("rect").nth(rect_index).click(force=True) + expect(page.locator(file_locator).first).to_be_visible() + + @pytest.mark.parametrize("action", ["select", "unselect"]) + def test_select_unselect_one_by_one_icon_view(self, logged_in_page: Page, action: str): + page = logged_in_page + + page.locator("rect").nth(0).click(force=True) + + items = page.locator("a[class^='document-card-view-item_card-item']") + expect(items.first).to_be_visible(timeout=10000) + + inputs = items.locator("input[type='checkbox']") + + if action == "unselect": + page.locator("label:has-text('Select All') div").first.click(force=True) + expect(inputs.first).to_be_checked(timeout=5000) + + count = inputs.count() + for i in range(count): + row = items.nth(i) + row.scroll_into_view_if_needed() + + input_el = row.locator("input[type='checkbox']").first + label = row.locator("label.uui-checkbox-container") + uui_div = row.locator("div.uui-checkbox") + + click_target = label.first if label.count() else uui_div.first if uui_div.count() else input_el + click_target.click(force=True) + + if action == "select": + expect(input_el).to_be_checked() + else: + expect(input_el).not_to_be_checked() + + @pytest.mark.parametrize("action", ["select", "unselect"]) + def test_select_unselect_one_by_one_list_view(self, logged_in_page: Page, action: str): + page = logged_in_page + page.locator("rect").nth(1).click(force=True) + + rows = page.locator("div.uui-table-row-container[role='row']") + expect(rows.first).to_be_visible(timeout=5000) + + count = rows.count() + assert count > 0, "no list rows found" + + if action == "unselect": + page.locator("label:has-text('Select All') div").first.click(force=True) + expect(page.locator("div.uui-checkbox > input[type='checkbox']").first).to_be_checked(timeout=5000) + + checkboxes = page.locator("div.uui-checkbox") + count = checkboxes.count() + for i in range(2, count): + cb = checkboxes.nth(i) + cb.scroll_into_view_if_needed() + cb.click(force=True) + + if action == "select": + expect(cb).to_be_checked() + else: + expect(cb).not_to_be_checked() diff --git a/test_automation_framework/tests/test_jobs.py b/test_automation_framework/tests/test_jobs.py new file mode 100644 index 000000000..e33f8de70 --- /dev/null +++ b/test_automation_framework/tests/test_jobs.py @@ -0,0 +1,451 @@ +from logging import getLogger +from datetime import datetime, timedelta +import uuid +from playwright.sync_api import Page, expect +from helpers.steps.jobs_creation import ( + run_new_job_documents_workflow, + run_new_job_first_line_workflow, + run_new_job_multi_tab_workflow, + run_new_job_dataset_without_documents_workflow, +) +from helpers.base_client.base_client import HTTPError + +import pytest +import re + + +logger = getLogger(__name__) + + +class TestJobs: + def test_create_and_poll_job( + self, file_client, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid + ): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] + assert move_resp["status"] is True + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_tracker[0].append(create_resp) + job_id = create_resp.get("id") + assert job_id + final_job = jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) + status = final_job.get("status") + assert str(status).lower() in {"finished", "success", "completed"} + job_files = final_job.get("files") or [] + assert file_info["id"] in job_files + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + @pytest.mark.parametrize("direction", ["asc", "desc"]) + # descending name sorting works weird + def test_jobs_sorting(self, jobs_client, field, direction): + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [], + "sorting": [{"direction": direction, "field": field}], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + data = resp["data"] + values = [d[field] for d in data if field in d and d[field] is not None] + + if field in {"creation_datetime", "deadline"}: + values = [datetime.fromisoformat(v) for v in values] + + expected = sorted(values, reverse=(direction == "desc")) + assert values == expected + + @pytest.mark.parametrize("field", ["name", "type", "status", "deadline", "creation_datetime"]) + def test_job_search(self, jobs_client, job_tracker, file_tracker, dataset_tracker, user_uuid, tmp_path, field): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_id = create_resp.get("id") + jobs_client.poll_until_finished(job_id=job_id, timeout_seconds=300) + job_tracker[0].append(create_resp) + search_value = create_resp.get(field, None) + + filters = [ + {"field": field, "operator": "eq", "value": search_value}, + {"field": "name", "operator": "eq", "value": job_name}, + ] + + search_resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 100}, + "filters": filters, + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + job_ids = [j["id"] for j in search_resp["data"]] + assert job_id in job_ids + + @pytest.mark.parametrize("field", ["creation_datetime", "deadline"]) + def test_jobs_date_range_filter(self, jobs_client, field): + start = (datetime.utcnow() - timedelta(days=365)).replace(microsecond=0).isoformat() + end = (datetime.utcnow() + timedelta(days=365)).replace(microsecond=0).isoformat() + + resp = jobs_client.post_json( + "/jobs/jobs/search", + json={ + "pagination": {"page_num": 1, "page_size": 15}, + "filters": [ + {"field": field, "operator": "ge", "value": start}, + {"field": field, "operator": "le", "value": end}, + ], + }, + headers=jobs_client._default_headers(content_type_json=True), + ) + + data = resp["data"] + for job in data: + if field in job and job[field] is not None: + date_val = datetime.fromisoformat(job[field]) + assert datetime.fromisoformat(start) <= date_val <= datetime.fromisoformat(end) + + def test_create_job_with_existing_name( + self, file_client, jobs_client, file_tracker, dataset_tracker, job_tracker, tmp_path, user_uuid + ): + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] + assert move_resp["status"] is True + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp_first = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_tracker[0].append(create_resp_first) + job_id = create_resp_first.get("id") + assert job_id + + with pytest.raises(HTTPError) as exc: + jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + assert exc.value.status_code == 400 + + +class TestJobsFrontend: + def test_jobs_scroll(self, jobs_page: Page): + page = jobs_page + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + page_size_input.click() + page.locator("div[role='option']", has_text="100").click() + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + + last_row = rows.last + last_row.scroll_into_view_if_needed() + expect(last_row).to_be_visible() + + first_row = rows.first + first_row.scroll_into_view_if_needed() + expect(first_row).to_be_visible() + + def test_jobs_pagination_by_page_number(self, jobs_page: Page): + page = jobs_page + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or rows.first.text_content() != old_text + + def test_jobs_pagination_by_arrows(self, jobs_page: Page): + page = jobs_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + old_text_back = rows.first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or rows.first.text_content() != old_text_back + + def test_jobs_show_on_page(self, jobs_page: Page): + page = jobs_page + + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + + expect(rows.first).to_be_visible(timeout=10000) + count = rows.count() + assert count <= int(value), f"Expected at most {value} rows, got {count}" + + @pytest.mark.parametrize("num_files", [1, 3]) + @pytest.mark.parametrize("manager", ["Airflow", "Databricks", "Other"]) + def test_create_job_documents_tab( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files, manager + ): + page = jobs_page + run_new_job_documents_workflow( + page=page, + num_files=num_files, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + jobs_client=jobs_client, + tmp_path=tmp_path, + pipeline_manager=manager, + ) + + @pytest.mark.parametrize("manager", ["Airflow", "Databricks"]) + @pytest.mark.parametrize("tab", ["Jobs", "Datasets", "Revisions"]) + def test_create_job_other_tabs( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, tab, manager, dataset_tracker + ): + page = jobs_page + run_new_job_first_line_workflow( + page=page, + num_files=1, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + jobs_client=jobs_client, + tmp_path=tmp_path, + pipeline_manager=manager, + tab_button=tab, + ) + + @pytest.mark.parametrize("manager", ["Airflow", "Databricks"]) + @pytest.mark.parametrize( + "tabs", + [ + ["Documents", "Jobs"], + ["Documents", "Datasets"], + ["Documents", "Revisions"], + ["Documents", "Jobs", "Datasets", "Revisions"], + ], + ) + def test_create_job_multi_tabs( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, dataset_tracker, manager, tabs + ): + page = jobs_page + run_new_job_multi_tab_workflow( + page=page, + jobs_client=jobs_client, + tabs=tabs, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + tmp_path=tmp_path, + pipeline_manager=manager, + ) + + def test_create_job_zero_dataset( + self, + jobs_page: Page, + file_tracker, + tmp_path, + jobs_client, + dataset_tracker, + ): + # outcome? + page = jobs_page + run_new_job_dataset_without_documents_workflow( + page=page, + jobs_client=jobs_client, + dataset_tracker=dataset_tracker, + ) + + def test_create_job_without_name( + self, + jobs_page: Page, + ): + page = jobs_page + logger.info("Open wizard") + page.get_by_role("button", name="New job").click() + page.get_by_role("button", name="Next").click() + page.get_by_role("button", name="New Job").click() + error_label = page.locator("div[role='alert'].uui-invalid-message").nth(0) + expect(error_label).to_have_text("The field is mandatory", timeout=5000) + + def test_create_job_save_draft( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker + ): + page = jobs_page + run_new_job_first_line_workflow( + page=page, + num_files=1, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + jobs_client=jobs_client, + tmp_path=tmp_path, + tab_button="Jobs", + save_as_draft=True, + ) + + @pytest.mark.parametrize( + "validation_type", ["Cross validation", "Extensive validation", "Hierarchical validation", "Validation only"] + ) + def test_create_job_human_in_the_loop( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, dataset_tracker, validation_type + ): + page = jobs_page + run_new_job_documents_workflow( + page=page, + num_files=1, + jobs_client=jobs_client, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + tmp_path=tmp_path, + human_in_loop=True, + validation_type=validation_type, + ) + + def test_create_job_human_in_the_loop_distribute( + self, jobs_page: Page, file_tracker, tmp_path, jobs_client, dataset_tracker + ): + page = jobs_page + run_new_job_documents_workflow( + page=page, + num_files=1, + jobs_client=jobs_client, + file_tracker=file_tracker, + dataset_tracker=dataset_tracker, + tmp_path=tmp_path, + human_in_loop=True, + distribute_tasks=True, + ) + + def test_open_any_job_from_table(self, jobs_page: Page): + page = jobs_page + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + expect(rows.first).to_be_visible(timeout=10000) + first_job = rows.first.locator("div").nth(1) + job_name = first_job.text_content().strip() + first_job.click() + expect(page).to_have_url(re.compile(r".*/jobs.*"), timeout=10000) + expect(page.get_by_text(job_name)).to_be_visible(timeout=10000) + + def test_open_job_panel_load_bar(self, jobs_page: Page): + page = jobs_page + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + expect(rows.first).to_be_visible(timeout=10000) + rows.first.click() + + sidebar = page.locator("div[class*='job-page_job-page-sidebar-content']") + expect(sidebar).to_be_visible(timeout=10000) + + progress_text = sidebar.locator("p[class*='job-sidebar-header_progressBarText']") + expect(progress_text).to_be_visible(timeout=10000) + + progress_bar = sidebar.locator("div[class*='job-sidebar-header_bar']") + count = progress_bar.count() + assert count > 0, "Progress bar element not found in DOM" + + def test_open_job_panel_hide_unhide(self, jobs_page: Page): + page = jobs_page + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + expect(rows.first).to_be_visible(timeout=10000) + rows.first.click() + + sidebar = page.locator("div[class*='job-page_job-page-sidebar-content']") + expect(sidebar).to_be_visible(timeout=10000) + + panel_title = sidebar.locator("h2") + expect(panel_title).to_have_text("Automatic", timeout=10000) + + panel_wrapper = sidebar.locator("div[class*='jod-detailed-sidebar-connector_sidebar-panel-wrapper']") + toggle_button = sidebar.locator("button[class*='jod-detailed-sidebar-connector_close-icon']") + expect(toggle_button).to_be_visible(timeout=5000) + + initial_classes = panel_wrapper.first.get_attribute("class") or "" + assert "sidebar-panel-opened" in initial_classes, f"Expected opened class, got: {initial_classes}" + + toggle_button.click() + + page.wait_for_timeout(300) + closed_classes = panel_wrapper.first.get_attribute("class") or "" + assert "sidebar-panel-closed" in closed_classes, f"Expected closed class, got: {closed_classes}" + + open_icon_button = sidebar.locator("button[class*='jod-detailed-sidebar-connector_open-icon']") + expect(open_icon_button).to_be_visible(timeout=5000) + + open_icon_button.click() + + page.wait_for_timeout(300) + reopened_classes = panel_wrapper.first.get_attribute("class") or "" + assert "sidebar-panel-opened" in reopened_classes, f"Expected reopened class, got: {reopened_classes}" diff --git a/test_automation_framework/tests/test_other.py b/test_automation_framework/tests/test_other.py new file mode 100644 index 000000000..396183370 --- /dev/null +++ b/test_automation_framework/tests/test_other.py @@ -0,0 +1,24 @@ +from logging import getLogger + +logger = getLogger(__name__) + + +class TestMenu: + def test_menu(self, menu_client): + menu = menu_client.get_menu() + assert isinstance(menu, list) + assert menu + required_keys = {"name", "badgerdoc_path", "is_external", "is_iframe", "url", "children"} + for item in menu: + assert required_keys <= item.keys() + first_item = menu[0] + assert isinstance(first_item["name"], str) + assert isinstance(first_item["badgerdoc_path"], str) + assert isinstance(first_item["is_external"], bool) + assert isinstance(first_item["children"], (list, type(None))) + expected_names = {"Documents", "My Tasks", "Jobs", "Settings"} + actual_names = {item["name"] for item in menu} + assert expected_names <= actual_names + settings_item = next(i for i in menu if i["name"] == "Settings") + assert isinstance(settings_item["children"], list) + assert any(child["name"] == "Keycloak" for child in settings_item["children"]) diff --git a/test_automation_framework/tests/test_plugins.py b/test_automation_framework/tests/test_plugins.py new file mode 100644 index 000000000..09cba3b3b --- /dev/null +++ b/test_automation_framework/tests/test_plugins.py @@ -0,0 +1,177 @@ +from logging import getLogger +import uuid +from playwright.sync_api import expect +import pytest + + +logger = getLogger(__name__) + + +class TestPlugins: + @pytest.mark.parametrize("iframe", [True, False]) + def test_create_and_delete_plugin(self, plugins_tracker, iframe): + created, plugins_client = plugins_tracker + unique_name = f"plugin_{uuid.uuid4().hex[:8]}" + resp = plugins_client.create_plugin( + name=unique_name, + menu_name=unique_name, + description="bar", + version="1", + url="http://what.com/what", + is_iframe=iframe, + ) + plugin_id = resp.id + created.append(plugin_id) + + plugins = plugins_client.get_plugins() + assert any(p.id == plugin_id for p in plugins) + assert any(p.name == unique_name for p in plugins) + + plugins_client.delete_plugin(plugin_id) + + plugins = plugins_client.get_plugins() + assert not any(p.id == plugin_id for p in plugins) + + def test_update_plugin(self, plugins_tracker): + created, plugins_client = plugins_tracker + unique_name = f"plugin_{uuid.uuid4().hex[:8]}" + resp = plugins_client.create_plugin( + name=unique_name, + menu_name=unique_name, + description="bar", + version="1", + url="http://what.com/what", + is_iframe=True, + ) + plugin_id = resp.id + created.append(plugin_id) + + updated_payload = { + "name": unique_name, + "menu_name": unique_name, + "description": "updated desc", + "version": "1", + "url": "http://what.com/what", + "is_iframe": True, + } + update_resp = plugins_client.update_plugin(plugin_id, **updated_payload) + assert update_resp.description == "updated desc" + + plugins = plugins_client.get_plugins() + updated = next(p for p in plugins if p.id == plugin_id) + assert updated.description == "updated desc" + + def test_view_plugins_from_settings(self, plugins_page, plugins_tracker): + page = plugins_page + created, plugins_client = plugins_tracker + plugins = plugins_client.get_plugins() + row_cells = page.locator("div[role='row'] div[role='cell']:first-child div div") + frontend_names = [row_cells.nth(i).inner_text().strip() for i in range(row_cells.count())] + api_names = [p.menu_name.strip() for p in plugins] + assert set(frontend_names) == set( + api_names + ), f"Frontend plugins {frontend_names} do not match API plugins {api_names}" + + def test_sort_plugins_by_name(self, plugins_page, plugins_tracker): + page = plugins_page + row_cells = page.locator("div[role='row'] div[role='cell']:first-child div div") + + def get_frontend_names(): + return [row_cells.nth(i).inner_text().strip() for i in range(row_cells.count())] + + initial_names = get_frontend_names() + assert initial_names, "No plugins loaded in frontend table" + + name_header = page.locator("div[role='columnheader'] >> text=Name") + expect(name_header).to_be_visible() + name_header.click() + + asc_names = get_frontend_names() + assert asc_names == sorted( + asc_names, key=lambda x: x.lower() + ), f"Plugins not sorted ascending by name: {asc_names}" + + name_header.click() + desc_names = get_frontend_names() + assert desc_names == sorted( + desc_names, key=lambda x: x.lower(), reverse=True + ), f"Plugins not sorted descending by name: {desc_names}" + + @pytest.mark.parametrize("delete_action", ["confirm", "cancel"]) + @pytest.mark.parametrize("iframe", [False, True]) + def test_create_and_delete_plugin_via_ui(self, plugins_page, plugins_tracker, delete_action, iframe): + created, plugins_client = plugins_tracker + page = plugins_page + + plugin_name = f"plugin_{uuid.uuid4().hex[:6]}" + menu_name = f"menu_{uuid.uuid4().hex[:6]}" + description = "test plugin description" + version = "1.0" + url = "http://what.com/what" + + page.get_by_role("button", name="Add Plugin").click() + page.get_by_role("textbox").nth(0).fill(plugin_name) + page.get_by_role("textbox").nth(1).fill(menu_name) + page.get_by_role("textbox").nth(2).fill(description) + page.get_by_role("textbox").nth(3).fill(version) + page.get_by_role("textbox", name="http://example.com/plugin").fill(url) + + if not iframe: + page.locator("label", has_text="Is Iframe Plugin?").locator("div").nth(1).click() + + page.get_by_role("button", name="Save").click() + + row = page.get_by_role("row", name=menu_name) + expect(row).to_be_visible(timeout=30000) + + plugins = plugins_client.get_plugins() + plugin = next((p for p in plugins if p.name == plugin_name), None) + created.append(plugin.id) + assert plugin, f"Plugin {plugin_name} not found in API" + assert plugin.is_iframe == iframe + + row.get_by_role("button").click() + page.get_by_role("button", name=delete_action.capitalize()).click() + + if delete_action == "confirm": + expect(page.get_by_role("row", name=menu_name)).not_to_be_visible(timeout=30000) + plugins = plugins_client.get_plugins() + assert all(plugin.id != p.id for p in plugins) + else: + expect(row).to_be_visible(timeout=30000) + plugins = plugins_client.get_plugins() + assert any(p.id == plugin.id for p in plugins) + + @pytest.mark.parametrize("missing_field", ["plugin_name", "menu_name", "version", "url"]) + def test_validate_mandatory_fields(self, plugins_page, missing_field, plugins_tracker): + created, plugins_client = plugins_tracker + page = plugins_page + + page.get_by_role("button", name="Add Plugin").click() + + plugin_name = f"plugin_{uuid.uuid4().hex[:6]}" if missing_field != "plugin_name" else "" + menu_name = f"menu_{uuid.uuid4().hex[:6]}" + version = "1.0" + url = "http://what.com/what" + + if missing_field != "plugin_name": + page.get_by_role("textbox").nth(0).fill(plugin_name) + if missing_field != "menu_name": + page.get_by_role("textbox").nth(1).fill(menu_name) + if missing_field != "version": + page.get_by_role("textbox").nth(3).fill(version) + if missing_field != "url": + page.get_by_role("textbox", name="http://example.com/plugin").fill(url) + + page.get_by_role("button", name="Save").click() + + if missing_field == "url": + expect(page.get_by_text("Please enter a valid URL starting with http://")).to_be_visible() + return + + plugins = plugins_client.get_plugins() + plugin = next((p for p in plugins if p.name == plugin_name), None) + assert plugin, f"Plugin {plugin_name} was not created (unexpected)" + created.append(plugin.id) + + pytest.fail(f"Validation missing for {missing_field}, plugin {plugin.id} was created") diff --git a/test_automation_framework/tests/test_reports.py b/test_automation_framework/tests/test_reports.py new file mode 100644 index 000000000..fc51f96cd --- /dev/null +++ b/test_automation_framework/tests/test_reports.py @@ -0,0 +1,36 @@ +from logging import getLogger + +import pytest + + +from helpers.base_client.base_client import HTTPError + +logger = getLogger(__name__) + + +class TestReports: + def test_export_tasks_csv(self, reports_client, user_uuid): + csv_text = reports_client.export_tasks( + user_ids=[user_uuid], + date_from="2025-05-01 00:00:00", + date_to="2025-08-31 00:00:00", + ) + assert "annotator_id" in csv_text + assert "task_id" in csv_text + + @pytest.mark.parametrize( + "date_from,date_to", + [ + ("2028-05-01 00:00:00", "2028-08-31 00:00:00"), + ("1900-01-01 00:00:00", "1900-12-31 00:00:00"), + ("2025-09-01 00:00:00", "2025-08-01 00:00:00"), + ], + ) + def test_export_tasks_wrong_date(self, reports_client, user_uuid, date_from, date_to): + with pytest.raises(HTTPError) as exc: + reports_client.export_tasks( + user_ids=[user_uuid], + date_from=date_from, + date_to=date_to, + ) + assert exc.value.status_code == 406 diff --git a/test_automation_framework/tests/test_tasks.py b/test_automation_framework/tests/test_tasks.py new file mode 100644 index 000000000..c88b014a0 --- /dev/null +++ b/test_automation_framework/tests/test_tasks.py @@ -0,0 +1,110 @@ +from logging import getLogger +from playwright.sync_api import Page, expect + + +logger = getLogger(__name__) + + +class TestTasksFrontend: + def test_tasks_scroll(self, tasks_page: Page): + page = tasks_page + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + page_size_input.click() + page.locator("div[role='option']", has_text="100").click() + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + + last_row = rows.last + last_row.scroll_into_view_if_needed() + expect(last_row).to_be_visible() + + first_row = rows.first + first_row.scroll_into_view_if_needed() + expect(first_row).to_be_visible() + + def test_tasks_pagination_by_page_number(self, tasks_page: Page): + page = tasks_page + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.get_by_role("button", name="2", exact=True).click() + + try: + expect(nav.get_by_role("button", name="2")).to_have_attribute("aria-current", "true", timeout=10000) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + active_attr = nav.get_by_role("button", name="2").get_attribute("aria-current") + assert active_attr == "true" or rows.first.text_content() != old_text + + def test_tasks_pagination_by_arrows(self, tasks_page: Page): + page = tasks_page + + nav = page.locator('nav[role="navigation"]') + nav.wait_for(state="visible", timeout=10000) + + rows = page.locator("div[role='row']").filter(has_not=page.locator("div[role='columnheader']")) + first_row = rows.first + expect(first_row).to_be_visible(timeout=10000) + + old_text = first_row.text_content() + + nav.locator("button").last.click() + try: + expect(nav.get_by_role("button", name="2", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text, timeout=10000) + + old_text_back = rows.first.text_content() + nav.locator("button").first.click() + try: + expect(nav.get_by_role("button", name="1", exact=True)).to_have_attribute( + "aria-current", "true", timeout=10000 + ) + except AssertionError: + expect(rows.first).not_to_have_text(old_text_back, timeout=10000) + + active_attr_1 = nav.get_by_role("button", name="1", exact=True).get_attribute("aria-current") + assert active_attr_1 == "true" or rows.first.text_content() != old_text_back + + def test_tasks_show_on_page(self, tasks_page: Page): + page = tasks_page + + rows = page.locator("div[role='row']").locator("xpath=..").locator("div[role='row']:not(.uui-table-header-row)") + + page_size_container = page.locator("div:has(> div > span:has-text('Show on page'))") + page_size_input = page_size_container.locator("input[aria-haspopup='true']") + + page_size_input.click() + options = page.locator("div[role='option']") + option_texts = [options.nth(i).inner_text() for i in range(options.count())] + page_size_input.click() + + for value in option_texts: + page_size_input.click() + + option = page.locator("div[role='option']", has_text=value).first + option.wait_for(state="visible", timeout=5000) + option.click() + expect(rows.first).to_be_visible(timeout=10000) + page.wait_for_timeout(1000) + page.wait_for_function( + """(expected) => { + const rows = document.querySelectorAll("div[role='row']:not(.uui-table-header-row)"); + return rows.length <= expected; + }""", + arg=int(value), + timeout=5000, + ) + count = rows.count() + assert count <= int(value), f"Expected at most {value} rows, got {count}" diff --git a/test_automation_framework/tests/test_upload_wizard.py b/test_automation_framework/tests/test_upload_wizard.py new file mode 100644 index 000000000..5cea3aab9 --- /dev/null +++ b/test_automation_framework/tests/test_upload_wizard.py @@ -0,0 +1,275 @@ +import uuid +import pytest +from playwright.sync_api import Page, expect +from helpers.files.file_client_frontend import FrontendFileHelper +from logging import getLogger +from pathlib import Path +from helpers.steps.jobs_creation import run_upload_workflow + +logger = getLogger(__name__) + + +class TestUploadWizard: + @pytest.mark.parametrize("num_files", [1, 3]) + @pytest.mark.parametrize("manager", [None, "Airflow", "Databricks"]) + def test_upload_documents_without_dataset( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, num_files, manager + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + pipeline_manager=manager, + dataset_type="none", + tmp_path=tmp_path, + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_existing_dataset( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files + ): + page = logged_in_page + created_files, client = file_tracker + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + first_resp = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in first_resp["detail"].lower() + + frontend_file_helper = FrontendFileHelper() + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="existing", + dataset_name=dataset_name, + tmp_path=tmp_path, + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_existing_dataset_new_name( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files + ): + # should we see an error here? + page = logged_in_page + created_files, client = file_tracker + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + first_resp = dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + assert "successfully created" in first_resp["detail"].lower() + + frontend_file_helper = FrontendFileHelper() + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="news", + dataset_name=dataset_name, + tmp_path=tmp_path, + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_new_dataset( + self, logged_in_page: Page, file_tracker, tmp_path, jobs_client, file_client, dataset_tracker, num_files + ): + page = logged_in_page + created_files, client = file_tracker + created_datasets, dataset_client = dataset_tracker + + dataset_name = f"autotest_{uuid.uuid4().hex[:8]}" + created_datasets.append(dataset_name) + + frontend_file_helper = FrontendFileHelper() + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="new", + dataset_name=dataset_name, + tmp_path=tmp_path, + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_with_language( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + language="English", + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_any_preprocessor( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + preprocessor="any", + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_all_settings_new_job_name( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + ): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + preprocessor="any", + language="English", + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_documents_all_settings_existing_job_name( + self, + logged_in_page: Page, + file_tracker, + tmp_path, + jobs_client, + file_client, + num_files, + dataset_client, + dataset_tracker, + user_uuid, + job_tracker, + ): + # should we get an error here as well? + # create a job + created_files, client = file_tracker + file_info, temp_file = client.upload_temp_file(client, file_tracker, tmp_path) + created_datasets, dataset_client = dataset_tracker + dataset_name = f"autotest_ds_{uuid.uuid4().hex[:8]}" + dataset_client.create_dataset(name=dataset_name) + created_datasets.append(dataset_name) + move_resp = file_client.move_files(name=dataset_name, objects=[file_info["id"]])[0] + assert move_resp["status"] is True + job_name = f"test_job_{uuid.uuid4().hex[:8]}" + create_resp = jobs_client.create_job( + name=job_name, + file_ids=[file_info["id"]], + owners=[user_uuid], + ) + job_tracker[0].append(create_resp) + + # run wizard + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + preprocessor="any", + language="English", + job_name=job_name, + ) + + @pytest.mark.parametrize("num_files", [1, 3]) + def test_upload_invalid_format(self, tmp_path, logged_in_page, num_files): + page = logged_in_page + temp_files = [] + for i in range(num_files): + invalid_file = Path(tmp_path / f"{uuid.uuid4().hex}.py") + invalid_file.write_text("this is py file") + temp_files.append(invalid_file) + + logger.info("Open wizard") + page.get_by_role("button", name="Upload Wizard").click() + page.locator("input[type='file']").set_input_files([str(f) for f in temp_files]) + page.get_by_role("button", name="Next").click() + try: + expect(page.locator("text=Error occurred")).to_be_visible(timeout=2000) + logger.info("Error message appeared as expected") + except TimeoutError: + pytest.fail("Expected error message did not appear") + + @pytest.mark.skip(reason="Returns 500 even in browser") + @pytest.mark.parametrize("num_files", [1, 3]) + def test_human_in_the_loop(self, logged_in_page: Page, num_files, file_tracker, jobs_client, tmp_path): + page = logged_in_page + created_files, client = file_tracker + frontend_file_helper = FrontendFileHelper() + run_upload_workflow( + page, + frontend_file_helper, + num_files, + file_tracker, + client, + jobs_client, + dataset_type="none", + tmp_path=tmp_path, + human_in_loop=True, + )