diff --git a/.claude/hooks/session-start.sh b/.claude/hooks/session-start.sh new file mode 100755 index 000000000..a0486fcfe --- /dev/null +++ b/.claude/hooks/session-start.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -euo pipefail + +# Only run in remote Claude Code sessions (web) +if [ "${CLAUDE_CODE_REMOTE:-}" != "true" ]; then + exit 0 +fi + +# Set CI to bypass the Pipelex Gateway terms acceptance check +echo 'export CI=true' >> "$CLAUDE_ENV_FILE" + +# Install all dependencies (creates venv if needed, runs uv sync --all-extras) +make install diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 000000000..a02622ec2 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,43 @@ +{ + "permissions": { + "allow": [ + "Bash(.venv/bin/pytest:*)", + "Bash(pytest:*)", + "Bash(make fix-unused-imports)", + "Bash(make fui)", + "Bash(make agent-check)", + "Bash(make format)", + "Bash(make lint)", + "Bash(make pyright)", + "Bash(make mypy)", + "Bash(make c)", + "Bash(make cc)", + "Bash(make check-unused-imports)", + "Bash(make cleanderived)", + "Bash(make agent-test)", + "Bash(make test-with-prints TEST=:*)", + "Bash(make tp TEST=:*)", + "Bash(make tb)", + "Bash(make install)", + "Bash(make lock)", + "Bash(make li)", + "Bash(make validate)", + "Bash(make v)", + "Bash(make build)", + "Bash(make ukc)", + "Bash(make docs-check)" + ] + }, + "hooks": { + "SessionStart": [ + { + "hooks": [ + { + "type": "command", + "command": "$CLAUDE_PROJECT_DIR/.claude/hooks/session-start.sh" + } + ] + } + ] + } +} \ No newline at end of file diff --git a/.pipelex-dev/test_profiles.toml b/.pipelex-dev/test_profiles.toml index 28cb5842c..6d838fdb3 100644 --- a/.pipelex-dev/test_profiles.toml +++ b/.pipelex-dev/test_profiles.toml @@ -31,23 +31,23 @@ [collections.backends] # --- All Available Backends --- all = [ - "pipelex_gateway", - "anthropic", - "azure_openai", - "bedrock", - "blackboxai", - "fal", - "google", - "groq", - "huggingface", - "mistral", - "ollama", - "openai", - "portkey", - "scaleway", - "vertexai", - "xai", - "internal", + "pipelex_gateway", + "anthropic", + "azure_openai", + "bedrock", + "blackboxai", + "fal", + "google", + "groq", + "huggingface", + "mistral", + "ollama", + "openai", + "portkey", + "scaleway", + "vertexai", + "xai", + "internal", ] [collections.llm] @@ -56,34 +56,34 @@ amazon = ["bedrock-nova-pro", "nova-lite-v1", "nova-micro-v1"] # --- Anthropic Models (Claude) --- anthropic = [ - "claude-3-haiku", - "claude-3.7-sonnet", - "claude-4-opus", - "claude-4-sonnet", - "claude-4.1-opus", - "claude-4.5-haiku", - "claude-4.5-sonnet", - "claude-4.5-opus", - "claude-4.6-opus", + "claude-3-haiku", + "claude-3.7-sonnet", + "claude-4-opus", + "claude-4-sonnet", + "claude-4.1-opus", + "claude-4.5-haiku", + "claude-4.5-sonnet", + "claude-4.5-opus", + "claude-4.6-opus", ] # --- DeepSeek Models --- deepseek = [ - "deepseek-chat", - "deepseek-r1", - "deepseek-v3.1", - "deepseek-r1-distill-llama-70b", - "deepseek-v3.2", - "deepseek-v3.2-speciale", + "deepseek-chat", + "deepseek-r1", + "deepseek-v3.1", + "deepseek-r1-distill-llama-70b", + "deepseek-v3.2", + "deepseek-v3.2-speciale", ] # --- Google Models (Gemini) --- google = [ - "gemini-2.5-flash", - "gemini-2.5-flash-lite", - "gemini-2.5-pro", - "gemini-3.0-pro", - "gemini-3.0-flash-preview", + "gemini-2.5-flash", + "gemini-2.5-flash-lite", + "gemini-2.5-pro", + "gemini-3.0-pro", + "gemini-3.0-flash-preview", ] # --- Groq Models --- @@ -91,15 +91,15 @@ groq = ["groq/compound", "groq/compound-mini"] # --- Meta Models (Llama) --- meta = [ - "bedrock-meta-llama-3-3-70b-instruct", - "llama-3.1-8b-instant", - "llama-3.1-8b-instruct", - "llama-3.2-11b-vision-instruct", - "llama-3.3-70b-instruct", - "llama-3.3-70b-instruct-free", - "meta-llama/llama-4-maverick-17b-128e-instruct", - "meta-llama/llama-4-scout-17b-16e-instruct", - "meta-llama/llama-guard-4-12b", + "bedrock-meta-llama-3-3-70b-instruct", + "llama-3.1-8b-instant", + "llama-3.1-8b-instruct", + "llama-3.2-11b-vision-instruct", + "llama-3.3-70b-instruct", + "llama-3.3-70b-instruct-free", + "meta-llama/llama-4-maverick-17b-128e-instruct", + "meta-llama/llama-4-scout-17b-16e-instruct", + "meta-llama/llama-guard-4-12b", ] # --- Microsoft Models --- @@ -107,27 +107,27 @@ microsoft = ["phi-4", "phi-4-multimodal"] # --- Mistral Models --- mistralai = [ - "bedrock-mistral-large", - "ministral-3b", - "ministral-8b", - "mistral-7b-2312", - "mistral-8x7b-2312", - "mistral-codestral-2405", - "pixtral-12b", - "pixtral-large", - "mistral-small-2506", - "mistral-small-3.2", - "mistral-small", - "mistral-medium-2508", - "mistral-medium-3.1", - "mistral-medium", - "mistral-large-2512", - "mistral-large-3", - "mistral-large", - "magistral-small-2509", - "magistral-small", - "magistral-medium-2509", - "magistral-medium", + "bedrock-mistral-large", + "ministral-3b", + "ministral-8b", + "mistral-7b-2312", + "mistral-8x7b-2312", + "mistral-codestral-2405", + "pixtral-12b", + "pixtral-large", + "mistral-small-2506", + "mistral-small-3.2", + "mistral-small", + "mistral-medium-2508", + "mistral-medium-3.1", + "mistral-medium", + "mistral-large-2512", + "mistral-large-3", + "mistral-large", + "magistral-small-2509", + "magistral-small", + "magistral-medium-2509", + "magistral-medium", ] # --- Moonshot AI Models --- @@ -135,28 +135,28 @@ moonshotai = ["kimi-k2-instruct-0905", "kimi-k2-thinking"] # --- OpenAI Models --- openai = [ - "gpt-4o-mini", - "gpt-4o", - "gpt-4.1-nano", - "gpt-4.1-mini", - "gpt-4.1", - "o1-mini", - "o1", - "o3-mini", - "o3", - "o4-mini", - "gpt-5-nano", - "gpt-5-mini", - "gpt-5-chat", - "gpt-5", - "gpt-5-codex", - "gpt-5.1-codex", - "gpt-5.1-codex-max", - "gpt-5.1-chat", - "gpt-5.1", - "gpt-5.2", - "gpt-5.2-chat", - "gpt-5.2-codex", + "gpt-4o-mini", + "gpt-4o", + "gpt-4.1-nano", + "gpt-4.1-mini", + "gpt-4.1", + "o1-mini", + "o1", + "o3-mini", + "o3", + "o4-mini", + "gpt-5-nano", + "gpt-5-mini", + "gpt-5-chat", + "gpt-5", + "gpt-5-codex", + "gpt-5.1-codex", + "gpt-5.1-codex-max", + "gpt-5.1-chat", + "gpt-5.1", + "gpt-5.2", + "gpt-5.2-chat", + "gpt-5.2-codex", ] # --- OpenAI OSS Models --- @@ -164,23 +164,23 @@ openai_oss = ["gpt-oss-20b", "gpt-oss-120b", "gpt-oss-safeguard-20b"] # --- Qwen Models --- qwen = [ - "qwen-2.5-72b-instruct", - "qwen3-32b", - "qwen2.5-vl-72b-instruct", - "qwen3-vl-235b-a22b", - "qwen3-235b-a22b-instruct-2507", - "qwen3-coder-30b-a3b-instruct", + "qwen-2.5-72b-instruct", + "qwen3-32b", + "qwen2.5-vl-72b-instruct", + "qwen3-vl-235b-a22b", + "qwen3-235b-a22b-instruct-2507", + "qwen3-coder-30b-a3b-instruct", ] # --- XAI Models (Grok) --- xai = [ - "grok-3", - "grok-3-mini", - "grok-3-fast", - "grok-3-mini-fast", - "grok-4", - "grok-4-fast-reasoning", - "grok-4-fast-non-reasoning", + "grok-3", + "grok-3-mini", + "grok-3-fast", + "grok-3-mini-fast", + "grok-4", + "grok-4-fast-reasoning", + "grok-4-fast-non-reasoning", ] [collections.img_gen] @@ -189,11 +189,11 @@ stable_diffusion = ["fast-lightning-sdxl"] # --- FAL Models --- fal = [ - "flux-pro", - "flux-pro/v1.1", - "flux-pro/v1.1-ultra", - "flux-2", - "flux-2-pro", + "flux-pro", + "flux-pro/v1.1", + "flux-pro/v1.1-ultra", + "flux-2", + "flux-2-pro", ] # --- OpenAI Models --- @@ -208,25 +208,25 @@ qwen = ["qwen-image"] [collections.extract] # --- PDF Extraction Models --- from_pdf = [ - "pypdfium2-extract-pdf", - "docling-extract-text", - "mistral-ocr", - "mistral-ocr-2503", - "mistral-ocr-2505", - "mistral-ocr-2512", - "mistral-document-ai-2505", - "azure-document-intelligence", + "pypdfium2-extract-pdf", + "docling-extract-text", + "mistral-ocr", + "mistral-ocr-2503", + "mistral-ocr-2505", + "mistral-ocr-2512", + "mistral-document-ai-2505", + "azure-document-intelligence", ] # --- Image Extraction Models --- from_image = [ - "docling-extract-text", - "mistral-ocr", - "mistral-ocr-2503", - "mistral-ocr-2505", - "mistral-ocr-2512", - "deepseek-ocr", - "azure-document-intelligence", + "docling-extract-text", + "mistral-ocr", + "mistral-ocr-2503", + "mistral-ocr-2505", + "mistral-ocr-2512", + "deepseek-ocr", + "azure-document-intelligence", ] ################################################################################ @@ -260,10 +260,10 @@ extract_models = ["@from_pdf"] description = "One model per backend for coverage" backends = ["anthropic", "openai", "google", "mistral", "internal"] llm_models = [ - "claude-4.5-haiku", - "gpt-4o-mini", - "gemini-2.5-flash-lite", - "mistral-large", + "claude-4.5-haiku", + "gpt-4o-mini", + "gemini-2.5-flash-lite", + "mistral-large", ] img_gen_models = ["gpt-image-1", "nano-banana"] extract_models = ["pypdfium2-extract-pdf"] diff --git a/.pipelex/inference/backends.toml b/.pipelex/inference/backends.toml index 3201b6a90..8ffc6c6b6 100644 --- a/.pipelex/inference/backends.toml +++ b/.pipelex/inference/backends.toml @@ -16,71 +16,71 @@ enabled = true # Enable after accepting terms via `pipel api_key = "${PIPELEX_GATEWAY_API_KEY}" [anthropic] -enabled = true +enabled = false api_key = "${ANTHROPIC_API_KEY}" [azure_openai] display_name = "Azure OpenAI" -enabled = true +enabled = false endpoint = "${AZURE_API_BASE}" api_key = "${AZURE_API_KEY}" api_version = "${AZURE_API_VERSION}" [bedrock] display_name = "Amazon Bedrock" -enabled = true +enabled = false aws_region = "${AWS_REGION}" [blackboxai] display_name = "BlackBox AI" -enabled = true +enabled = false endpoint = "https://api.blackbox.ai/v1" api_key = "${BLACKBOX_API_KEY}" [fal] display_name = "FAL" -enabled = true +enabled = false api_key = "${FAL_API_KEY}" [google] display_name = "Google AI" -enabled = true +enabled = false api_key = "${GOOGLE_API_KEY}" [groq] display_name = "Groq" -enabled = true +enabled = false endpoint = "https://api.groq.com/openai/v1" api_key = "${GROQ_API_KEY}" [huggingface] display_name = "Hugging Face" -enabled = true +enabled = false api_key = "${HF_TOKEN}" [mistral] display_name = "Mistral AI" -enabled = true +enabled = false api_key = "${MISTRAL_API_KEY}" [ollama] -enabled = true +enabled = false endpoint = "http://localhost:11434/v1" [openai] display_name = "OpenAI" -enabled = true +enabled = false api_key = "${OPENAI_API_KEY}" [portkey] display_name = "Portkey" -enabled = true +enabled = false endpoint = "https://api.portkey.ai/v1" api_key = "${PORTKEY_API_KEY}" [scaleway] display_name = "Scaleway" -enabled = true +enabled = false endpoint = "${SCALEWAY_ENDPOINT}" api_key = "${SCALEWAY_API_KEY}" @@ -93,7 +93,7 @@ gcp_credentials_file_path = "${GCP_CREDENTIALS_FILE_PATH}" [xai] display_name = "xAI" -enabled = true +enabled = false endpoint = "https://api.x.ai/v1" api_key = "${XAI_API_KEY}" diff --git a/.pipelex/pipelex.toml b/.pipelex/pipelex.toml index ed8859fcf..12516db51 100644 --- a/.pipelex/pipelex.toml +++ b/.pipelex/pipelex.toml @@ -101,19 +101,19 @@ signed_urls_lifespan_seconds = 3600 # Set to "disabled [pipelex.scan_config] # Directories to exclude when scanning for pipeline files excluded_dirs = [ - ".venv", - "venv", - "env", - ".env", - "virtualenv", - ".virtualenv", - ".git", - "__pycache__", - ".pytest_cache", - ".mypy_cache", - ".ruff_cache", - "node_modules", - "results", + ".venv", + "venv", + "env", + ".env", + "virtualenv", + ".virtualenv", + ".git", + "__pycache__", + ".pytest_cache", + ".mypy_cache", + ".ruff_cache", + "node_modules", + "results", ] #################################################################################################### @@ -189,4 +189,3 @@ is_dump_response_text_enabled = false is_dump_kwargs_enabled = false is_dump_response_enabled = false is_dump_error_enabled = false - diff --git a/.pipelex/plxt.toml b/.pipelex/plxt.toml new file mode 100644 index 000000000..c0a68b01f --- /dev/null +++ b/.pipelex/plxt.toml @@ -0,0 +1,123 @@ +# ============================================================================= +# Pipelex TOML Configuration for pipelex-demo +# ============================================================================= +# Configures TOML/MTHDS formatting and linting behaviour for this project. +# Powered by the Pipelex extension (plxt / taplo engine). +# +# Docs: https://taplo.tamasfe.dev/configuration/ +# ============================================================================= + +# --------------------------------------------------------------------------- +# File discovery +# --------------------------------------------------------------------------- + +# Glob patterns for files to process. +include = ["**/*.toml", "**/*.mthds", "**/*.plx"] + +exclude = [ + ".venv/**", + ".mypy_cache/**", + ".ruff_cache/**", + ".pytest_cache/**", + "__pycache__/**", + "target/**", + "node_modules/**", + ".git/**", + "*.lock", +] # Glob patterns for files to ignore. +# These are evaluated relative to the config file location. + +# ============================================================================= +# Global formatting defaults +# ============================================================================= +# These apply to every file matched by `include` unless overridden by a +# [[rule]].formatting section below. Every option is shown at its built-in +# default so you can tune any of them in one place. + +[formatting] +align_entries = false # line up "=" signs across consecutive entries +align_comments = true # align end-of-line comments on consecutive lines +align_single_comments = true # also align lone comments (requires align_comments) +array_trailing_comma = true +array_auto_expand = true # go multiline when array exceeds column_width +array_auto_collapse = false # don't re-collapse multiline arrays that fit +inline_table_expand = true # expand inline tables exceeding column_width +compact_arrays = true # [1, 2] not [ 1, 2 ] +compact_inline_tables = false # keep spaces inside braces: { a = 1 } +compact_entries = false # keep spaces around "=": key = value +column_width = 80 +indent_tables = false +indent_entries = false +indent_string = " " +trailing_newline = true +reorder_keys = false +reorder_arrays = false +reorder_inline_tables = false +allowed_blank_lines = 2 +crlf = false + +# ============================================================================= +# Per-file-type rules +# ============================================================================= +# Each [[rule]] can narrow its scope with `include` / `exclude` globs and +# provide its own [rule.formatting] overrides. Options not listed here fall +# back to the global [formatting] section above. + + +# --------------------------------------------------------------------------- +# Rule: TOML files +# --------------------------------------------------------------------------- +[[rule]] +# Which files this rule applies to (relative globs). +include = ["**/*.toml"] + +# Per-rule formatting overrides β€” all at defaults so you can tweak them +# independently of .mthds files. +[rule.formatting] +# align_entries = false +# align_comments = true +# align_single_comments = true +# array_trailing_comma = true +# array_auto_expand = true +# array_auto_collapse = true +# inline_table_expand = true +# compact_arrays = true +# compact_inline_tables = false +# compact_entries = false +# column_width = 80 +# indent_tables = false +# indent_entries = false +# indent_string = " " +# trailing_newline = true +# allowed_blank_lines = 2 + + +# --------------------------------------------------------------------------- +# Rule: MTHDS files (Pipelex pipeline definitions) +# --------------------------------------------------------------------------- +[[rule]] +# Which files this rule applies to (relative globs). +include = ["**/*.mthds", "**/*.plx"] + +[rule.schema] +path = "pipelex/language/mthds_schema.json" + +# Per-rule formatting overrides β€” all at defaults so you can tweak them +# independently of .toml files. +[rule.formatting] +align_entries = true +# align_comments = true +# align_single_comments = true +# array_trailing_comma = true +# array_auto_expand = true +# array_auto_collapse = true +# inline_table_expand = true +# compact_arrays = true +# compact_inline_tables = false +# compact_entries = false +# column_width = 80 +# indent_tables = false +# indent_entries = false +# indent_string = " " +# trailing_newline = true +# allowed_blank_lines = 2 diff --git a/.pipelex/telemetry.toml b/.pipelex/telemetry.toml index eb2c5374d..ed9a90901 100644 --- a/.pipelex/telemetry.toml +++ b/.pipelex/telemetry.toml @@ -29,11 +29,11 @@ api_key = "${POSTHOG_API_KEY}" # Get from PostHog Project Settings geoip = true # Enable GeoIP lookup debug = false # Enable PostHog debug mode redact_properties = [ - "prompt", - "system_prompt", - "response", - "file_path", - "url", + "prompt", + "system_prompt", + "response", + "file_path", + "url", ] # Event properties to redact # AI span tracing to YOUR PostHog (does NOT affect Langfuse/OTLP - they receive full data) diff --git a/.vscode/launch.json b/.vscode/launch.json index 77c0b76f6..4bae7cc96 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -75,7 +75,7 @@ "program": "${workspaceFolder}/.venv/bin/pipelex", "args": [ "validate", - "temp/bundle.plx", + "temp/bundle.mthds", ], "console": "integratedTerminal", "justMyCode": false @@ -99,7 +99,7 @@ "program": "${workspaceFolder}/.venv/bin/pipelex", "args": [ "run", - "tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx", + "tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds", ], "console": "integratedTerminal", "justMyCode": false @@ -111,7 +111,7 @@ "program": "${workspaceFolder}/.venv/bin/pipelex", "args": [ "run", - "tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx", + "tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds", "--pipe", "describe_image", "--inputs", diff --git a/.vscode/settings.json b/.vscode/settings.json index c37976523..6c602ee57 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -22,10 +22,18 @@ "python.testing.pytestEnabled": true, "djlint.showInstallError": false, "files.associations": { - "*.plx": "plx" + "*.plx": "mthds" }, "editor.formatOnSave": true, "[html]": { "editor.formatOnSave": false + }, + "[toml]": { + "editor.defaultFormatter": "Pipelex.pipelex", + "editor.formatOnSave": true + }, + "[mthds]": { + "editor.defaultFormatter": "Pipelex.pipelex", + "editor.formatOnSave": true } } \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md index 2603f071f..2e297a3e4 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -16,6 +16,7 @@ - Pyright: Static type checking - Ruff: Fix unused imports, lint, format - Mypy: Static type checker + - plxt: Format and lint TOML, MTHDS, and PLX files Always fix any issues reported by these tools before proceeding. @@ -39,6 +40,8 @@ ### Running Tests with Prints + > **LOCAL ONLY**: The commands below are meant for a human developer running on their local machine. If you are an AI agent (Claude Code, Cursor, Codex, or any other agent running in the cloud or in a sandboxed environment), **do NOT use these commands**. Use `make agent-test` instead. + If anything went wrong, you can run the tests with prints to see the error: ```bash @@ -48,6 +51,8 @@ ### Running specific Tests + > **LOCAL ONLY**: The commands below are meant for a human developer running on their local machine. If you are an AI agent (Claude Code, Cursor, Codex, or any other agent running in the cloud or in a sandboxed environment), **do NOT use these commands**. Use `make agent-test` instead. + ```bash make tp TEST=TestClassName # or @@ -57,6 +62,8 @@ ### Running Last Failed Tests + > **LOCAL ONLY**: The commands below are meant for a human developer running on their local machine. If you are an AI agent (Claude Code, Cursor, Codex, or any other agent running in the cloud or in a sandboxed environment), **do NOT use these commands**. Use `make agent-test` instead. + To rerun only the tests that failed in the previous run, use: ```bash @@ -82,6 +89,22 @@ For standard installations, the virtual environment is named `.venv`. Always check this first. On Windows, the path is `.venv\Scripts\` instead of `.venv/bin/`. +### Pipelex Dev CLI (`pipelex-dev`) + + The `pipelex-dev` CLI provides internal development tools that are not distributed with the package. It is available in the virtual environment. + + ```bash + .venv/bin/pipelex-dev --help + ``` + + Key commands: + + - **`generate-mthds-schema`**: Regenerate the MTHDS JSON Schema (`pipelex/language/mthds_schema.json`). Run this after modifying `mthds_schema_generator.py`. + + ```bash + .venv/bin/pipelex-dev generate-mthds-schema + ``` + ## Coding Standards & Best Practices for Python Code This document outlines the core coding standards, best practices, and quality control procedures for the codebase. @@ -249,7 +272,7 @@ NEVER EVER put more than one TestClass into a test module. - Place test files in the appropriate test category directory: - `tests/unit/` - for unit tests that test individual functions/classes in isolation - `tests/integration/` - for integration tests that test component interactions - - `tests/e2e/` - for end-to-end tests that test complete workflows + - `tests/e2e/` - for end-to-end tests that test complete methods - Do NOT add `__init__.py` files to test directories. Test directories do not need to be Python packages. - Fixtures are defined in conftest.py modules at different levels of the hierarchy, their scope is handled by pytest - Test data is placed inside test_data.py at different levels of the hierarchy, they must be imported with package paths from the root like `from tests.integration.pipelex.cogt.test_data`. Their content is all constants, regrouped inside classes to keep things tidy. diff --git a/Makefile b/Makefile index 714d78839..a6443e696 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,7 @@ VENV_PIPELEX := "$(VIRTUAL_ENV)/bin/pipelex" VENV_MKDOCS := "$(VIRTUAL_ENV)/bin/mkdocs" VENV_MIKE := "$(VIRTUAL_ENV)/bin/mike" VENV_PYLINT := "$(VIRTUAL_ENV)/bin/pylint" +VENV_PLXT := RUST_LOG=warn "$(VIRTUAL_ENV)/bin/plxt" VENV_PIPELEX_DEV := "$(VIRTUAL_ENV)/bin/pipelex-dev" SKELETON_DIR := "$(HOME)/.pipelex-skeleton/" @@ -54,10 +55,14 @@ make update - Upgrade dependencies via uv make validate - Run the setup sequence to validate the config and libraries make build - Build the wheels -make format - format with ruff format -make lint - lint with ruff check +make format - format with ruff and plxt +make lint - lint with ruff and plxt +make ruff-format - format with ruff format +make ruff-lint - lint with ruff check make pyright - Check types with pyright make mypy - Check types with mypy +make plxt-format - Format TOML/MTHDS/PLX files with plxt +make plxt-lint - Lint TOML/MTHDS/PLX files with plxt make rules - Install agent rules for contributing to Pipelex make up-kit-configs - Update kit configs from .pipelex/ @@ -67,6 +72,8 @@ make ccs - Shorthand -> check-config-sync make check-rules - Verify installed agent rules match kit templates make check-urls - Check all URLs in pipelex/urls.py for broken links (quiet) make cu - Check URLs with verbose output (shows details) +make generate-mthds-schema - Generate JSON Schema for .mthds files +make gms - Shorthand -> generate-mthds-schema make update-gateway-models - Update gateway models reference make ugm - Shorthand -> update-gateway-models make check-gateway-models - Check gateway models reference is up-to-date @@ -84,6 +91,8 @@ make merge-check-ruff-lint - Run ruff merge check without updating files make merge-check-ruff-format - Run ruff merge check without updating files make merge-check-mypy - Run mypy merge check without updating files make merge-check-pyright - Run pyright merge check without updating files +make merge-check-plxt-format - Run plxt format check without modifying files +make merge-check-plxt-lint - Run plxt lint check make v - Shorthand -> validate make codex-tests - Run tests for Codex (exit on first failure) (no inference, no codex_disabled) @@ -119,7 +128,7 @@ make docs-list - List deployed documentation versions make docs-deploy VERSION=x.y.z - Deploy docs as version x.y.z (local, no push) make docs-deploy-stable - Deploy stable docs with 'latest' alias (CI only) make docs-deploy-specific-version - Deploy docs for the current version with 'pre-release' alias (CI only) -make docs-deploy-404 - Deploy 404.html for versionless URL redirects +make docs-deploy-root - Deploy root assets (404.html, robots.txt, index.html) to gh-pages make docs-delete VERSION=x.y.z - Delete a deployed documentation version make serve-graph - Start HTTP server to view ReactFlow graphs (PORT=8765, DIR=temp/test_outputs) @@ -148,7 +157,7 @@ export HELP .PHONY: \ all help env env-verbose check-uv check-uv-verbose lock install update build \ - format lint pyright mypy pylint \ + format lint ruff-format ruff-lint pyright mypy pylint plxt-format plxt-lint \ rules up-kit-configs ukc check-config-sync ccs check-rules check-urls cu insert-skeleton \ cleanderived cleanenv cleanall \ test test-xdist t test-quiet tq test-with-prints tp test-inference ti \ @@ -156,13 +165,14 @@ export HELP run-all-tests run-manual-trigger-gha-tests run-gha_disabled-tests \ validate v check c cc agent-check agent-test \ test-durations td test-durations-serial tds test-time tt test-time-serial tts \ - merge-check-ruff-lint merge-check-ruff-format merge-check-mypy merge-check-pyright \ + merge-check-ruff-lint merge-check-ruff-format merge-check-mypy merge-check-pyright merge-check-plxt-format merge-check-plxt-lint \ li check-unused-imports fix-unused-imports check-TODOs check-uv \ docs docs-check docs-serve-versioned docs-list docs-deploy docs-deploy-stable docs-deploy-specific-version docs-delete \ + generate-mthds-schema gms \ update-gateway-models ugm check-gateway-models cgm up \ test-count check-test-badge \ serve-graph serve-graph-bg stop-graph-server view-graph sg vg \ - docs-deploy-404 + docs-deploy-root all help: @echo "$$HELP" @@ -224,6 +234,12 @@ lock: env @uv lock && \ echo uv lock without update; +plxt: env ## Rebuild and reinstall plxt CLI from local vscode-pipelex source + $(call PRINT_TITLE,"Reinstalling plxt from source") + @. $(VIRTUAL_ENV)/bin/activate && \ + uv sync --all-extras --reinstall-package plxt && \ + echo "Reinstalled plxt in ${VIRTUAL_ENV}"; + update: env $(call PRINT_TITLE,"Updating all dependencies") @uv lock --upgrade && \ @@ -276,6 +292,15 @@ check-config-sync: env ccs: check-config-sync @echo "> done: ccs = check-config-sync" +generate-mthds-schema: env + $(call PRINT_TITLE,"Generating MTHDS JSON Schema") + $(VENV_PIPELEX_DEV) generate-mthds-schema + +gms: generate-mthds-schema + @echo "> done: gms = generate-mthds-schema" + +# TODO: Add check-mthds-schema target (like check-gateway-models) for CI freshness verification + update-gateway-models: env $(call PRINT_TITLE,"Updating gateway models reference") $(VENV_PIPELEX_DEV) update-gateway-models @@ -664,17 +689,31 @@ cm: cov-missing @echo "> done: cm = cov-missing" ########################################################################################## -### LINTING +### FORMATTING, LINTING, AND TYPECHECKING ########################################################################################## -format: env +ruff-format: env $(call PRINT_TITLE,"Formatting with ruff") $(VENV_RUFF) format . --config pyproject.toml -lint: env +ruff-lint: env $(call PRINT_TITLE,"Linting with ruff") $(VENV_RUFF) check . --fix --config pyproject.toml +plxt-format: env + $(call PRINT_TITLE,"Formatting TOML/MTHDS with plxt") + $(VENV_PLXT) fmt + +plxt-lint: env + $(call PRINT_TITLE,"Linting TOML/MTHDS with plxt") + $(VENV_PLXT) lint + +format: ruff-format plxt-format + @echo "> done: format = ruff-format plxt-format" + +lint: ruff-lint plxt-lint + @echo "> done: lint = ruff-lint plxt-lint" + pyright: env $(call PRINT_TITLE,"Typechecking with pyright") $(VENV_PYRIGHT) --pythonpath $(VENV_PYTHON) --project pyproject.toml @@ -712,6 +751,14 @@ merge-check-pylint: env $(call PRINT_TITLE,"Linting with pylint") $(VENV_PYLINT) --rcfile pyproject.toml . +merge-check-plxt-format: env + $(call PRINT_TITLE,"Checking TOML/MTHDS formatting with plxt") + $(VENV_PLXT) fmt --check + +merge-check-plxt-lint: env + $(call PRINT_TITLE,"Linting TOML/MTHDS with plxt") + $(VENV_PLXT) lint + ########################################################################################## ### MISCELLANEOUS ########################################################################################## @@ -737,6 +784,31 @@ check-TODOs: env # Extract version from pyproject.toml for docs deployment DOCS_VERSION := $(shell grep -m1 '^version = ' pyproject.toml | sed -E 's/version = "(.*)"/\1/') +SITE_DOMAIN := $(shell cat docs/CNAME 2>/dev/null | tr -d '[:space:]') + +define ROOT_ROBOTS_TXT +User-agent: * +Allow: /latest/ +Disallow: / +Sitemap: https://$(SITE_DOMAIN)/latest/sitemap.xml +endef +export ROOT_ROBOTS_TXT + +define ROOT_INDEX_HTML + + + + + Redirecting to latest documentation... + + + + +

Redirecting to latest documentation...

+ + +endef +export ROOT_INDEX_HTML docs: env $(call PRINT_TITLE,"Serving documentation with mkdocs") @@ -758,24 +830,32 @@ docs-deploy: env $(call PRINT_TITLE,"Deploying documentation version $(if $(VERSION),$(VERSION),$(DOCS_VERSION))") $(VENV_MIKE) deploy $(if $(VERSION),$(VERSION),$(DOCS_VERSION)) -docs-deploy-stable: env docs-deploy-404 +docs-deploy-stable: env $(call PRINT_TITLE,"Deploying stable documentation $(DOCS_VERSION) with latest alias") $(VENV_MIKE) deploy --push --update-aliases $(DOCS_VERSION) latest $(VENV_MIKE) set-default --push latest + $(MAKE) docs-deploy-root -docs-deploy-specific-version: env docs-deploy-404 +docs-deploy-specific-version: env $(call PRINT_TITLE,"Deploying documentation $(DOCS_VERSION) with pre-release alias") $(VENV_MIKE) deploy --push --update-aliases $(DOCS_VERSION) pre-release + $(MAKE) docs-deploy-root -docs-deploy-404: - $(call PRINT_TITLE,"Deploying 404.html to gh-pages root for versionless URL redirects") - @TMPDIR=$$(mktemp -d); \ +docs-deploy-root: +ifeq ($(SITE_DOMAIN),) + $(error SITE_DOMAIN is empty β€” docs/CNAME is missing or blank. Cannot generate root assets with valid URLs) +endif + $(call PRINT_TITLE,"Deploying root assets (404.html, robots.txt, index.html) to gh-pages") + @git fetch origin gh-pages:gh-pages 2>/dev/null || true; \ + TMPDIR=$$(mktemp -d); \ trap "cd '$(CURDIR)'; git worktree remove '$$TMPDIR' 2>/dev/null || true; rm -rf '$$TMPDIR'" EXIT; \ git worktree add "$$TMPDIR" gh-pages && \ cp docs/404.html "$$TMPDIR/404.html" && \ + echo "$$ROOT_ROBOTS_TXT" > "$$TMPDIR/robots.txt" && \ + echo "$$ROOT_INDEX_HTML" > "$$TMPDIR/index.html" && \ cd "$$TMPDIR" && \ - git add 404.html && \ - (git diff --cached --quiet || git commit -m "Update 404.html for versionless URL redirects") && \ + git add 404.html robots.txt index.html && \ + (git diff --cached --quiet || git commit -m "Update root assets (404.html, robots.txt, index.html)") && \ git push origin gh-pages docs-delete: env diff --git a/README.md b/README.md index 41bd2b779..1f42afae1 100644 --- a/README.md +++ b/README.md @@ -11,8 +11,8 @@

-

AI Workflows That Agents Build & Run

-

Pipelex is developing the open standard for repeatable AI workflows.
+

AI Methods That Agents Build & Run

+

Pipelex is developing the open standard for repeatable AI methods.
Write business logic, not API calls.

@@ -76,17 +76,17 @@ Use your existing API keys from OpenAI, Anthropic, Google, Mistral, etc. See [Co Run models locally with Ollama, vLLM, LM Studio, or llama.cpp - no API keys required. See [Configure AI Providers](https://docs.pipelex.com/pre-release/home/5-setup/configure-ai-providers/) for details. -## 3. Generate Your First Workflow +## 3. Generate Your First Method -Create a complete AI workflow with a single command: +Create a complete AI method with a single command: ```bash -pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and generate 5 questions for the interview" --output results/cv_match.plx +pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and generate 5 questions for the interview" --output results/cv_match.mthds ``` -This command generates a production-ready `.plx` file with domain definitions, concepts, and multiple processing steps that analyzes CV-job fit and prepares interview questions. +This command generates a production-ready `.mthds` file with domain definitions, concepts, and multiple processing steps that analyzes CV-job fit and prepares interview questions. -**cv_match.plx** +**cv_match.mthds** ```toml domain = "cv_match" description = "Matching CVs with job offers and generating interview questions" @@ -109,7 +109,7 @@ refines = "Text" [pipe.analyze_cv_job_match_and_generate_questions] type = "PipeSequence" description = """ -Main pipeline that orchestrates the complete CV-job matching and interview question generation workflow. Takes a candidate's CV and a job offer as PDF documents, extracts their content, performs a comprehensive match analysis identifying strengths, gaps, and areas to probe, and generates exactly 5 targeted interview questions based on the analysis results. +Main pipeline that orchestrates the complete CV-job matching and interview question generation method. Takes a candidate's CV and a job offer as PDF documents, extracts their content, performs a comprehensive match analysis identifying strengths, gaps, and areas to probe, and generates exactly 5 targeted interview questions based on the analysis results. """ inputs = { cv_pdf = "PDF", job_offer_pdf = "PDF" } output = "Question[5]" @@ -131,7 +131,7 @@ Executes parallel extraction of text content from both the CV PDF and job offer """ inputs = { cv_pdf = "PDF", job_offer_pdf = "PDF" } output = "Dynamic" -parallels = [ +branches = [ { pipe = "extract_cv_text", result = "cv_pages" }, { pipe = "extract_job_offer_text", result = "job_offer_pages" }, ] @@ -255,7 +255,7 @@ flowchart TD ```bash # Run with input file -pipelex run results/cv_match.plx --inputs inputs.json +pipelex run results/cv_match.mthds --inputs inputs.json ``` Create an `inputs.json` file with your PDF URLs: @@ -282,17 +282,19 @@ Create an `inputs.json` file with your PDF URLs: ```python import asyncio import json -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipelex import Pipelex async def run_pipeline(): with open("inputs.json", encoding="utf-8") as f: inputs = json.load(f) - pipe_output = await execute_pipeline( + runner = PipelexRunner() + response = await runner.execute_pipeline( pipe_code="cv_match", inputs=inputs ) + pipe_output = response.pipe_output print(pipe_output.main_stuff_as_str) Pipelex.make() @@ -305,13 +307,13 @@ asyncio.run(run_pipeline())
-

From Whiteboard to AI Workflow in less than 5 minutes with no hands (2025-07)

+

From Whiteboard to AI Method in less than 5 minutes with no hands (2025-07)

Pipelex Demo
-

The AI workflow that writes an AI workflow in 64 seconds (2025-09)

+

The AI method that writes an AI method in 64 seconds (2025-09)

Pipelex Live Demo @@ -323,21 +325,21 @@ asyncio.run(run_pipeline()) ## πŸ’‘ What is Pipelex? -Pipelex is an open-source language that enables you to build and run **repeatable AI workflows**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. +Pipelex is an open-source language that enables you to build and run **repeatable AI methods**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. -Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.plx` files) is simple and human-readable, even for non-technical users. Each step can be structured and validated, giving you the reliability of software with the intelligence of AI. +Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.mthds` files) is simple and human-readable, even for non-technical users. Each step can be structured and validated, giving you the reliability of software with the intelligence of AI. ## πŸ“– Next Steps **Learn More:** -- [Design and Run Pipelines](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/pipes/) - Complete guide with examples -- [Kick off a Pipeline Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project/) - Deep dive into Pipelex +- [Design and Run Methods](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/pipes/) - Complete guide with examples +- [Kick off a Method Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-methods-project/) - Deep dive into Pipelex - [Configure AI Providers](https://docs.pipelex.com/pre-release/home/5-setup/configure-ai-providers/) - Set up AI providers and models ## πŸ”§ IDE Extension -We **highly** recommend installing our extension for `.plx` files into your IDE. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex). It's coming soon to VS Code marketplace too. If you're using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. +We **highly** recommend installing our extension for `.mthds` files into your IDE. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex). It's coming soon to VS Code marketplace too. If you're using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. ## πŸ“š Examples & Cookbook diff --git a/docs/home/1-releases/chicago.md b/docs/home/1-releases/chicago.md index 82e11df81..f9a8128d6 100644 --- a/docs/home/1-releases/chicago.md +++ b/docs/home/1-releases/chicago.md @@ -4,7 +4,7 @@ title: "Chicago Release" # Pipelex v0.18.0 "Chicago" -**The AI workflow framework that just works.** +**The AI method framework that just works.** ## Why Pipelex @@ -12,19 +12,19 @@ Pipelex eliminates the complexity of building AI-powered applications. Instead o - **One framework** for prompts, pipelines, and structured outputs - **One API key** for dozens of AI models -- **One workflow** from prototype to production +- **One method** from prototype to production --- ## A Major Milestone -Three months after our first public launch in San Francisco, Pipelex reaches a new level of maturity with the "Chicago" release (currently in beta-test). This version delivers on our core promise: **enabling every developer to build AI workflows that are reliable, flexible, and production-ready**. +Three months after our first public launch in San Francisco, Pipelex reaches a new level of maturity with the "Chicago" release (currently in beta-test). This version delivers on our core promise: **enabling every developer to build AI methods that are reliable, flexible, and production-ready**. Version 0.18.0 represents our most significant release to date, addressing the three priorities that emerged from real-world usage: - **Universal model access** β€” one API key for all leading AI models - **State-of-the-art document extraction** β€” deployable anywhere -- **Visual pipeline inspection** β€” full transparency into your workflows +- **Visual pipeline inspection** β€” full transparency into your methods --- @@ -91,7 +91,7 @@ Broad support for open-source AI: ### Developer Experience -- **Pure PLX Workflows** β€” Inline concept structures now support nested concepts, making Pipelex fully usable with just `.plx` files and the CLIβ€”no Python code required +- **Pure MTHDS Methods** β€” Inline concept structures now support nested concepts, making Pipelex fully usable with just `.mthds` files and the CLIβ€”no Python code required - **Deep Integration Options** β€” Generate Pydantic BaseModels from your declarative concepts for full IDE autocomplete, type checking, and validation (TypeScript Zod structures coming soon) - **PipeCompose Construct Mode** β€” Build `StructuredContent` objects deterministically without an LLM, composing outputs from working memory variables, fixed values, templates, and nested structures - **Cloud Storage for Artifacts** β€” Store generated images and extracted pages on AWS S3 or Google Cloud Storage with public or signed URLs @@ -112,7 +112,7 @@ Then run `pipelex init` to configure your environment and obtain your Gateway AP --- -*Ready to build AI workflows that just work?* +*Ready to build AI methods that just work?* [Join the Waitlist](https://go.pipelex.com/waitlist){ .md-button .md-button--primary } [Documentation](https://docs.pipelex.com/pre-release){ .md-button } diff --git a/docs/home/10-advanced-customizations/observer-provider-injection.md b/docs/home/10-advanced-customizations/observer-provider-injection.md index f277ef067..eaeb4b21b 100644 --- a/docs/home/10-advanced-customizations/observer-provider-injection.md +++ b/docs/home/10-advanced-customizations/observer-provider-injection.md @@ -216,4 +216,4 @@ def setup_pipelex(): return pipelex_instance ``` -The observer system provides powerful insights into your pipeline execution patterns and is essential for monitoring, debugging, and optimizing your Pipelex workflows. \ No newline at end of file +The observer system provides powerful insights into your pipeline execution patterns and is essential for monitoring, debugging, and optimizing your Pipelex methods. \ No newline at end of file diff --git a/docs/home/2-get-started/pipe-builder.md b/docs/home/2-get-started/pipe-builder.md index 48e81d3b8..8fa377486 100644 --- a/docs/home/2-get-started/pipe-builder.md +++ b/docs/home/2-get-started/pipe-builder.md @@ -1,5 +1,5 @@ --- -title: "Generate Workflows with Pipe Builder" +title: "Generate Methods with Pipe Builder" --- ![Pipelex Banner](https://d2cinlfp2qnig1.cloudfront.net/banners/pipelex_banner_docs_v2.png) @@ -18,9 +18,9 @@ During the second step of the initialization, we recommand, for a quick start, t If you want to bring your own API keys, see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. -# Generate workflows with Pipe Builder +# Generate methods with Pipe Builder -The fastest way to create production-ready AI workflows is with the Pipe Builder. Just describe what you want, and Pipelex generates complete, validated pipelines. +The fastest way to create production-ready AI methods is with the Pipe Builder. Just describe what you want, and Pipelex generates complete, validated pipelines. ```bash pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and generate 5 questions for the interview" @@ -28,12 +28,12 @@ pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and ge The pipe builder generates three files in a numbered directory (e.g., `results/pipeline_01/`): -1. **`bundle.plx`** - Complete production-ready script in our Pipelex language with domain definition, concepts, and pipe steps +1. **`bundle.mthds`** - Complete production-ready script in our Pipelex language with domain definition, concepts, and pipe steps 2. **`inputs.json`** - Template describing the **mandatory** inputs for running the pipe 3. **`run_{pipe_code}.py`** - Ready-to-run Python script that you can customize and execute !!! tip "Pipe Builder Requirements" - For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key, otherwise, you can also create the workflows yourself, following our [documentation guide](./write-workflows-manually.md). + For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key, otherwise, you can also create the methods yourself, following our [documentation guide](./write-methods-manually.md). !!! info "Learn More" Want to understand how the Pipe Builder works under the hood? See [Pipe Builder Deep Dive](../9-tools/pipe-builder.md) for the full explanation of its multi-step generation process. @@ -43,18 +43,18 @@ The pipe builder generates three files in a numbered directory (e.g., `results/p **Option 1: CLI** ```bash -pipelex run results/cv_match.plx --inputs inputs.json +pipelex run results/cv_match.mthds --inputs inputs.json ``` The `--inputs` file should be a JSON dictionary where keys are input variable names and values are the input data. Learn more on how to provide the inputs of a pipe: [Providing Inputs to Pipelines](../../home/6-build-reliable-ai-workflows/pipes/provide-inputs.md) **Option 2: Python** -This requires having the `.plx` file or your pipe inside the directory where the Python file is located. +This requires having the `.mthds` file or your pipe inside the directory where the Python file is located. ```python import json -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipelex import Pipelex # Initialize Pipelex @@ -65,10 +65,12 @@ with open("inputs.json", "r", encoding="utf-8") as json_file: inputs = json.load(json_file) # Execute the pipeline -pipe_output = await execute_pipeline( +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="analyze_cv_and_prepare_interview", inputs=inputs ) +pipe_output = response.pipe_output print(pipe_output.main_stuff) @@ -76,7 +78,7 @@ print(pipe_output.main_stuff) ## IDE Support -We **highly** recommend installing our own extension for PLX files into your IDE of choice. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex) and download it directly using [this link](https://open-vsx.org/api/Pipelex/pipelex/0.2.1/file/Pipelex.pipelex-0.2.1.vsix). It's coming soon to the VS Code marketplace too and if you are using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. +We **highly** recommend installing our own extension for MTHDS files into your IDE of choice. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex) and download it directly using [this link](https://open-vsx.org/api/Pipelex/pipelex/0.2.1/file/Pipelex.pipelex-0.2.1.vsix). It's coming soon to the VS Code marketplace too and if you are using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. ## Examples @@ -86,12 +88,12 @@ We **highly** recommend installing our own extension for PLX files into your IDE ## Next Steps -Now that you know how to generate workflows with the Pipe Builder, explore these resources: +Now that you know how to generate methods with the Pipe Builder, explore these resources: -**Learn how to Write Workflows yourself** +**Learn how to Write Methods yourself** -- [:material-pencil: Write Workflows Manually](./write-workflows-manually.md){ .md-button .md-button--primary } -- [:material-book-open-variant: Build Reliable AI Workflows](../6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md){ .md-button .md-button--primary } +- [:material-pencil: Write Methods Manually](./write-methods-manually.md){ .md-button .md-button--primary } +- [:material-book-open-variant: Build Reliable AI Methods](../6-build-reliable-ai-workflows/kick-off-a-methods-project.md){ .md-button .md-button--primary } **Explore Examples:** diff --git a/docs/home/2-get-started/write-workflows-manually.md b/docs/home/2-get-started/write-methods-manually.md similarity index 90% rename from docs/home/2-get-started/write-workflows-manually.md rename to docs/home/2-get-started/write-methods-manually.md index 478983b92..525cbe0e8 100644 --- a/docs/home/2-get-started/write-workflows-manually.md +++ b/docs/home/2-get-started/write-methods-manually.md @@ -1,16 +1,16 @@ -# Writing Workflows +# Writing Methods -Ready to dive deeper? This section shows you how to manually create pipelines and understand the `.plx` language. +Ready to dive deeper? This section shows you how to manually create pipelines and understand the `.mthds` language. -!!! tip "Prefer Automated Workflow Generation?" - If you have access to **Claude 4.5 Sonnet** (via Pipelex Inference, Anthropic, Amazon Bedrock, or BlackBox AI), you can use our **pipe builder** to generate workflows from natural language descriptions. See the [Pipe Builder guide](./pipe-builder.md) to learn how to use `pipelex build pipe` commands. This tutorial is for those who want to write workflows manually or understand the `.plx` language in depth. +!!! tip "Prefer Automated Method Generation?" + If you have access to **Claude 4.5 Sonnet** (via Pipelex Inference, Anthropic, Amazon Bedrock, or BlackBox AI), you can use our **pipe builder** to generate methods from natural language descriptions. See the [Pipe Builder guide](./pipe-builder.md) to learn how to use `pipelex build pipe` commands. This tutorial is for those who want to write methods manually or understand the `.mthds` language in depth. ## Write Your First Pipeline Let's build a **character generator** to understand the basics. -Create a `.plx` file anywhere in your project (we recommend a `pipelines` directory): +Create a `.mthds` file anywhere in your project (we recommend a `pipelines` directory): -`character.plx` +`character.mthds` ```toml domain = "characters" # domain of existance of your pipe @@ -41,15 +41,17 @@ Create a Python file to execute the pipeline: `character.py` ```python -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipelex import Pipelex # Initialize pipelex to load your pipeline libraries Pipelex.make() -pipe_output = await execute_pipeline( +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="create_character", ) +pipe_output = response.pipe_output print(pipe_output.main_stuff_as_str) # `main_stuff_as_str` is allowed here because the output is a `TextContent` ``` @@ -70,9 +72,9 @@ As you might notice, this is plain text, and nothing is structured. Now we are g Let's create a rigorously structured `Character` object instead of plain text. We need to create the concept `Character`. The concept names MUST be in PascalCase. [Learn more about defining concepts](../6-build-reliable-ai-workflows/concepts/define_your_concepts.md) -### Option 1: Define the Structure in your `.plx` file +### Option 1: Define the Structure in your `.mthds` file -Define structures directly in your `.plx` file: +Define structures directly in your `.mthds` file: ```toml [concept.Character] # Declare the concept by giving it a name. @@ -89,7 +91,7 @@ description = "A description of the character" # Fourth attribute: "descrip Specify that the output of your Pipellm is a `Character` object: -`characters.plx` +`characters.mthds` ```toml domain = "characters" @@ -146,7 +148,7 @@ Learn more in [Inline Structures](../6-build-reliable-ai-workflows/concepts/inli Specify that the output of your Pipellm is a `Character` object: -`characters.plx` +`characters.mthds` ```toml domain = "characters" @@ -229,7 +231,7 @@ Learn more about Jinja in the [PipeLLM documentation](../../home/6-build-reliabl `run_pipe.py` ```python -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipelex import Pipelex from character_model import CharacterMetadata @@ -250,10 +252,12 @@ inputs = { } # Run the pipe with loaded inputs -pipe_output = await execute_pipeline( +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="extract_character_advances", inputs=inputs, ) +pipe_output = response.pipe_output # Get the result as a properly typed instance print(pipe_output) @@ -284,7 +288,7 @@ class CharacterMetadata(StructuredContent): `run_pipe.py` ```python -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipelex import Pipelex from character_model import CharacterMetadata @@ -305,10 +309,12 @@ inputs = { } # Run the pipe with loaded inputs -pipe_output = await execute_pipeline( +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="extract_character_advances", inputs=inputs, ) +pipe_output = response.pipe_output # Get the result as a properly typed instance extracted_metadata = pipe_output.main_stuff_as(content_type=CharacterMetadata) @@ -325,12 +331,12 @@ Now that you understand the basics, explore more: **Learn More about the PipeLLM:** -- [LLM Configuration: play with the models](../../home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md) - Optimize cost and quality +- [LLM Configuration: play with the models](../../home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md) - Optimize cost and quality - [Full configuration of the PipeLLM](../../home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md) **Learn more about Pipelex (domains, project structure, best practices...)** -- [Build Reliable AI Workflows](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) - Deep dive into pipeline design +- [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md) - Deep dive into pipeline design - [Cookbook Examples](../../home/4-cookbook-examples/index.md) - Real-world examples and patterns **Learn More about the other pipes** diff --git a/docs/home/3-understand-pipelex/language-spec-v0-1-0.md b/docs/home/3-understand-pipelex/language-spec-v0-1-0.md index 7f6b319aa..f26e523ba 100644 --- a/docs/home/3-understand-pipelex/language-spec-v0-1-0.md +++ b/docs/home/3-understand-pipelex/language-spec-v0-1-0.md @@ -1,28 +1,28 @@ -# Pipelex (PLX) – Declarative AI Workflow Spec (v0.1.0) +# Pipelex (MTHDS) – Declarative AI Method Spec (v0.1.0) -**Build deterministic, repeatable AI workflows using declarative TOML syntax.** +**Build deterministic, repeatable AI methods using declarative TOML syntax.** -The Pipelex Language (PLX) uses a TOML-based syntax to define deterministic, repeatable AI workflows. This specification documents version 0.1.0 of the language and establishes the canonical way to declare domains, concepts, and pipes inside `.plx` bundles. +The Pipelex Language (MTHDS) uses a TOML-based syntax to define deterministic, repeatable AI methods. This specification documents version 0.1.0 of the language and establishes the canonical way to declare domains, concepts, and pipes inside `.mthds` bundles. --- ## Core Idea -Pipelex is a workflow declaration language that gets interpreted at runtime, we already have a Python runtime (see [github.com/pipelex/pipelex](https://github.com/pipelex/pipelex)). +Pipelex is a method declaration language that gets interpreted at runtime, we already have a Python runtime (see [github.com/pipelex/pipelex](https://github.com/pipelex/pipelex)). -Pipelex lets you declare **what** your AI workflow should accomplish and **how** to execute it step by step. Each `.plx` file represents a bundle where you define: +Pipelex lets you declare **what** your AI method should accomplish and **how** to execute it step by step. Each `.mthds` file represents a bundle where you define: - **Concepts** (PascalCase): the structured or unstructured data flowing through your system -- **Pipes** (snake_case): operations or orchestrators that define your workflow +- **Pipes** (snake_case): operations or orchestrators that define your method - **Domain** (named in snake_case): the topic or field of work this bundle is about -Write once in `.plx` files. Run anywhere. Get the same results every time. +Write once in `.mthds` files. Run anywhere. Get the same results every time. --- ## Semantics -Pipelex workflows are **declarative and deterministic**: +Pipelex methods are **declarative and deterministic**: - Pipes are evaluated based on their dependencies, not declaration order - Controllers explicitly define execution flow (sequential, parallel, or conditional) @@ -35,7 +35,7 @@ All concepts are strongly typed. All pipes declare their inputs and outputs. The **Guarantees:** -- Deterministic workflow execution and outputs +- Deterministic method execution and outputs - Strong typing with validation before runtime **Not supported in v0.1.0:** @@ -48,9 +48,9 @@ All concepts are strongly typed. All pipes declare their inputs and outputs. The --- -## Complete Example: CV Job Matching Workflow +## Complete Example: CV Job Matching Method -This workflow analyses candidate CVs against job offer requirements to determine match quality. +This method analyses candidate CVs against job offer requirements to determine match quality. ```toml domain = "cv_job_matching" @@ -180,5 +180,5 @@ Evaluate how well this candidate matches the job requirements. - Processes all candidate CVs in parallel (batch processing) - Each CV is extracted and analyzed against the structured job requirements using an LLM - Produces a scored match analysis for each candidate with strengths, weaknesses, and hiring recommendations -- Demonstrates sequential orchestration, parallel processing, nested workflows, and strong typing +- Demonstrates sequential orchestration, parallel processing, nested methods, and strong typing diff --git a/docs/home/3-understand-pipelex/pipelex-paradigm/index.md b/docs/home/3-understand-pipelex/pipelex-paradigm/index.md index 80ca7b913..0754ec490 100644 --- a/docs/home/3-understand-pipelex/pipelex-paradigm/index.md +++ b/docs/home/3-understand-pipelex/pipelex-paradigm/index.md @@ -1,12 +1,12 @@ # The Pipelex Paradigm -Pipelex is an **open-source Python framework** for defining and running **repeatable AI workflows**. +Pipelex is an **open-source Python framework** for defining and running **repeatable AI methods**. Here's what we've learned: LLMs are powerful, but asking them to do everything in one prompt is like asking a brilliant colleague to solve ten problems while juggling. The more complexity you pack into a single prompt, the more reliability drops. You've seen it: the perfect prompt that works 90% of the time until it doesn't. The solution is straightforward: break complex tasks into focused steps. But without proper tooling, you end up with spaghetti code and prompts scattered across your codebase. -Pipelex introduces **knowledge pipelines**: a way to capture these workflow steps as **composable pipes**. Each pipe follows one rule: **knowledge in, knowledge out**. Unlike rigid templates, each pipe uses AI's full intelligence to handle variation while guaranteeing consistent output structure. You get **deterministic structure with adaptive intelligence**, the reliability of software with the flexibility of AI. +Pipelex introduces **knowledge pipelines**: a way to capture these method steps as **composable pipes**. Each pipe follows one rule: **knowledge in, knowledge out**. Unlike rigid templates, each pipe uses AI's full intelligence to handle variation while guaranteeing consistent output structure. You get **deterministic structure with adaptive intelligence**, the reliability of software with the flexibility of AI. ## Working with Knowledge and Using Concepts to Make Sense diff --git a/docs/home/3-understand-pipelex/viewpoint.md b/docs/home/3-understand-pipelex/viewpoint.md index 1690ef0fe..78aed111d 100644 --- a/docs/home/3-understand-pipelex/viewpoint.md +++ b/docs/home/3-understand-pipelex/viewpoint.md @@ -5,13 +5,13 @@ Web version: https://knowhowgraph.com/ --- # Viewpoint: The Know-How Graph -Declarative, Repeatable AI Workflows as Shared Infrastructure +Declarative, Repeatable AI Methods as Shared Infrastructure **TL;DR** Agents are great at solving new problems, terrible at doing the same thing twice. -We argue that repeatable AI workflows should complement agents: written in a declarative language that both humans and agents can understand, reuse, and compose. These workflows become tools that agents can build, invoke, and share to turn repeatable cognitive work into reliable infrastructure. +We argue that repeatable AI methods should complement agents: written in a declarative language that both humans and agents can understand, reuse, and compose. These methods become tools that agents can build, invoke, and share to turn repeatable cognitive work into reliable infrastructure. At scale, this forms a **Know-How Graph:** a network of reusable methods that become shared infrastructure. @@ -25,13 +25,13 @@ This is **the repeatability paradox**. Agents excel at understanding requirement ### We Need a Standard for Reusable Methods -The solution is to capture these methods as AI workflows so agents can reuse them. +The solution is to capture these methods as AI methods so agents can reuse them. -By "AI workflows" we mean the actual intellectual work that wasn't automatable before LLMs: extracting structured data from unstructured documents, applying complex analyses and business rules, generating reports with reasoning. **This isn’t about API plumbing or app connectors, it’s about the actual intellectual work.** +By "AI methods" we mean the actual intellectual work that wasn't automatable before LLMs: extracting structured data from unstructured documents, applying complex analyses and business rules, generating reports with reasoning. **This isn’t about API plumbing or app connectors, it’s about the actual intellectual work.** -Yet look at what's happening today: teams everywhere are hand-crafting the same workflows from scratch. To extract data points from contracts and RFPs, to process expense reports, to classify documents, to screen resumes: identical problems solved in isolation, burning engineering hours. +Yet look at what's happening today: teams everywhere are hand-crafting the same methods from scratch. To extract data points from contracts and RFPs, to process expense reports, to classify documents, to screen resumes: identical problems solved in isolation, burning engineering hours. -## AI workflows must be formalized +## AI methods must be formalized OpenAPI and MCP enable interoperability for software and agents. The remaining problem is formalizing the **methods that assemble the cognitive steps themselves:** extraction, analysis, synthesis, creativity, and decision-making, the part where understanding matters. These formalized methods must be: @@ -39,29 +39,29 @@ OpenAPI and MCP enable interoperability for software and agents. The remaining p - **Efficient:** use the right AI model for each step, large or small. - **Transparent:** no black boxes. Domain experts can audit the logic, spot issues, suggest improvements. -The workflow becomes a shared artifact that humans and AI collaborate on, optimize together, and trust to run at scale. +The method becomes a shared artifact that humans and AI collaborate on, optimize together, and trust to run at scale. ### Current solutions are inadequate -Engineers building AI workflows today are stuck with bad options. +Engineers building AI methods today are stuck with bad options. -Code frameworks like LangChain require **maintaining custom software for every workflow,** with business logic buried in implementation details and technical debt accumulating with each new use case. +Code frameworks like LangChain require **maintaining custom software for every method,** with business logic buried in implementation details and technical debt accumulating with each new use case. -Visual builders like Zapier, Make, or n8n excel at what they're designed for: connecting APIs and automating data flow between services. **But automation platforms are not cognitive workflow systems.** AI was bolted on as a feature after the fact. They weren't built for intellectual work. When you need actual understanding and multi-step reasoning, these tools quickly become unwieldy. +Visual builders like Zapier, Make, or n8n excel at what they're designed for: connecting APIs and automating data flow between services. **But automation platforms are not cognitive method systems.** AI was bolted on as a feature after the fact. They weren't built for intellectual work. When you need actual understanding and multi-step reasoning, these tools quickly become unwieldy. -None of these solutions speak the language of the domain expert. None of them were built for agents to understand, modify, or generate workflows from requirements. They express technical plumbing, not business logic. +None of these solutions speak the language of the domain expert. None of them were built for agents to understand, modify, or generate methods from requirements. They express technical plumbing, not business logic. At the opposite, agent SDKs and multi-agent frameworks give you flexibility but sacrifice the repeatability you need for production. **You want agents for exploration and problem-solving, but when you've found a solution that works, you need to lock it down.** -> We need a universal workflow language that expresses business logic, not technical plumbing. -This workflow language must run across platforms, models, and agent frameworks, where the method outlives any vendor or model version. +> We need a universal method language that expresses business logic, not technical plumbing. +This method language must run across platforms, models, and agent frameworks, where the method outlives any vendor or model version. > ## We Need a Declarative Language -AI workflows should be first-class citizens of our technical infrastructure: not buried in code or trapped in platforms, but expressed in a language built for the job. The method should be an artifact you can version, diff, test, and optimize. +AI methods should be first-class citizens of our technical infrastructure: not buried in code or trapped in platforms, but expressed in a language built for the job. The method should be an artifact you can version, diff, test, and optimize. -**We need a declarative language that states what you want, not how to compute it.** As SQL separated intent from implementation for data, we need the same for AI workflows β€” so we can build a Know-How Graph: a reusable graph of methods that agents and humans both understand. +**We need a declarative language that states what you want, not how to compute it.** As SQL separated intent from implementation for data, we need the same for AI methods β€” so we can build a Know-How Graph: a reusable graph of methods that agents and humans both understand. ### The language shouldn’t need documentation: it is the documentation @@ -71,22 +71,22 @@ Traditional programs are instructions a machine blindly executes. The machine do ### Language fosters collaboration: users and agents building together -The language must be readable by everyone who matters: domain experts who know the business logic, engineers who optimize and deploy it, and crucially, AI agents that can build and refine workflows autonomously. +The language must be readable by everyone who matters: domain experts who know the business logic, engineers who optimize and deploy it, and crucially, AI agents that can build and refine methods autonomously. -Imagine agents that transform natural language requirements into working workflows. They design each transformation step (or reuse existing ones), test against real or synthetic data, incorporate expert feedback, and iterate to improve quality while reducing costs. Once a workflow is built, agents can invoke it as a reliable tool whenever they need structured, predictable outputs. +Imagine agents that transform natural language requirements into working methods. They design each transformation step (or reuse existing ones), test against real or synthetic data, incorporate expert feedback, and iterate to improve quality while reducing costs. Once a method is built, agents can invoke it as a reliable tool whenever they need structured, predictable outputs. -> This is how agents finally remember know-how: by encoding methods into reusable workflows they can build, share, and execute on demand. +> This is how agents finally remember know-how: by encoding methods into reusable methods they can build, share, and execute on demand. > ## The Know-How Graph: a Network of Composable Methods -**Breaking complex work into smaller tasks is a recursive, core pattern.** Each workflow should stand on the shoulders of others, composing like LEGO bricks to build increasingly sophisticated cognitive systems. +**Breaking complex work into smaller tasks is a recursive, core pattern.** Each method should stand on the shoulders of others, composing like LEGO bricks to build increasingly sophisticated cognitive systems. What emerges is a **Know-How Graph**: not just static knowledge, but executable methods that connect and build upon one another. **Unlike a knowledge graph mapping facts, this maps procedures: the actual know-how of getting cognitive work done.** **Example:** -A recruitment workflow doesn't start from scratch. It composes existing workflows: +A recruitment method doesn't start from scratch. It composes existing methods: - ExtractCandidateProfile (experience, education, skills…) - ExtractJobOffer (skills, years of experience…). @@ -95,23 +95,23 @@ These feed into your custom ScoreCard logic to produce a MatchAnalysis, which tr Each component can be assigned to different team members and validated independently by the relevant stakeholders. -> Think of a workflow as a proven route through the work, and the Know-How Graph as the network of all such routes. +> Think of a method as a proven route through the work, and the Know-How Graph as the network of all such routes. > ### Know-how is as shareable as knowledge -Think about the explosion of prompt sharing since 2023. All those people trading their best ChatGPT prompts on Twitter, GitHub, Reddit, LinkedIn. Now imagine that same viral knowledge sharing, but with complete, tested, composable workflows instead of fragile prompts. +Think about the explosion of prompt sharing since 2023. All those people trading their best ChatGPT prompts on Twitter, GitHub, Reddit, LinkedIn. Now imagine that same viral knowledge sharing, but with complete, tested, composable methods instead of fragile prompts. -We’ve seen this movie: software package managers, SQL views, Docker, dbt packages. Composable standards create ecosystems where everyone’s work makes everyone else more productive. Generic workflows for common tasks will spread rapidly, while companies keep their differentiating workflows as competitive advantage. That's how we stop reinventing the wheel while preserving secret sauce. +We’ve seen this movie: software package managers, SQL views, Docker, dbt packages. Composable standards create ecosystems where everyone’s work makes everyone else more productive. Generic methods for common tasks will spread rapidly, while companies keep their differentiating methods as competitive advantage. That's how we stop reinventing the wheel while preserving secret sauce. -The same principle applies to AI workflows through the Know-How Graph: durable infrastructure that compounds value over time. +The same principle applies to AI methods through the Know-How Graph: durable infrastructure that compounds value over time. -> The Know-How Graph will thrive on the open web because workflows are just files: easy to publish, fork, improve, and compose. +> The Know-How Graph will thrive on the open web because methods are just files: easy to publish, fork, improve, and compose. > ### What this unlocks -- Faster time to production (reuse existing workflowsΒ + AI writes them for you) +- Faster time to production (reuse existing methodsΒ + AI writes them for you) - Lower run costs (optimize priceΒ / performance for each task) - Better collaboration between tech and business - Better auditabilityΒ / compliance @@ -121,26 +121,26 @@ The same principle applies to AI workflows through the Know-How Graph: durable i [**Pipelex**](https://github.com/Pipelex/pipelex) is our take on this language: open-source (MIT), designed for the Know-How Graph. -Each workflow is built from pipes: modular transformations that guarantee their output structure while applying intelligence to the content. A pipe is a knowledge transformer with a simple contract: knowledge in β†’ knowledge out., each defined conceptually and with explicit structure and validation. The method is readable and editable by humans and agents. +Each method is built from pipes: modular transformations that guarantee their output structure while applying intelligence to the content. A pipe is a knowledge transformer with a simple contract: knowledge in β†’ knowledge out., each defined conceptually and with explicit structure and validation. The method is readable and editable by humans and agents. -Our Pipelex workflow builder is itself a Pipelex workflow. The tooling builds itself. +Our Pipelex method builder is itself a Pipelex method. The tooling builds itself. ## Why This Can Become a Standard -Pipelex is MIT-licensed and designed for portability. Workflows are files, based on TOML syntax (itself well standardized), and the outputs are validated JSON. +Pipelex is MIT-licensed and designed for portability. Methods are files, based on TOML syntax (itself well standardized), and the outputs are validated JSON. -Early adopters are contributing to the [cookbook repo](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago), building integrations, and running workflows in production. The pieces for ecosystem growth are in place: declarative spec, reference implementation, composable architecture. +Early adopters are contributing to the [cookbook repo](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago), building integrations, and running methods in production. The pieces for ecosystem growth are in place: declarative spec, reference implementation, composable architecture. Building a standard is hard. We're at v0.1.0, with versioning and backward compatibility coming next. The spec will evolve with your feedback. ## Join Us -The most valuable standards are boring infrastructure everyone relies on: SQL, HTTP, JSON. Pipelex aims to be that for AI workflows. +The most valuable standards are boring infrastructure everyone relies on: SQL, HTTP, JSON. Pipelex aims to be that for AI methods. -Start with one workflow: extract invoice data, process applications, analyze reports… Share what works. Build on what others share. +Start with one method: extract invoice data, process applications, analyze reports… Share what works. Build on what others share. -**The future of AI needs both:** smarter agents that explore and adapt, AND reliable workflows that execute proven methods at scale. One workflow at a time, let's build the cognitive infrastructure every organization needs. +**The future of AI needs both:** smarter agents that explore and adapt, AND reliable methods that execute proven methods at scale. One method at a time, let's build the cognitive infrastructure every organization needs. --- diff --git a/docs/home/4-cookbook-examples/extract-dpe.md b/docs/home/4-cookbook-examples/extract-dpe.md index 7df181d3e..edc91c142 100644 --- a/docs/home/4-cookbook-examples/extract-dpe.md +++ b/docs/home/4-cookbook-examples/extract-dpe.md @@ -52,7 +52,7 @@ class Dpe(StructuredContent): yearly_energy_costs: Optional[float] = None ``` -## The Pipeline Definition: `extract_dpe.plx` +## The Pipeline Definition: `extract_dpe.mthds` The pipeline uses a `PipeLLM` with a very specific prompt to extract the information from the document. The combination of the image and the OCR text allows the LLM to accurately capture all the details. diff --git a/docs/home/4-cookbook-examples/extract-gantt.md b/docs/home/4-cookbook-examples/extract-gantt.md index 156e8eeee..7ea9043f6 100644 --- a/docs/home/4-cookbook-examples/extract-gantt.md +++ b/docs/home/4-cookbook-examples/extract-gantt.md @@ -51,9 +51,9 @@ class GanttChart(StructuredContent): milestones: Optional[List[Milestone]] ``` -## The Pipeline Definition: `gantt.plx` +## The Pipeline Definition: `gantt.mthds` -The `extract_gantt_by_steps` pipeline is a sequence of smaller, focused pipes. This is a great example of building a complex workflow from simple, reusable components. +The `extract_gantt_by_steps` pipeline is a sequence of smaller, focused pipes. This is a great example of building a complex method from simple, reusable components. ```toml [pipe.extract_gantt_by_steps] @@ -92,7 +92,7 @@ Here is the name of the task you have to extract the dates for: @gantt_task_name """ ``` -This demonstrates the "divide and conquer" approach that Pipelex encourages. By breaking down a complex problem into smaller steps, each step can be handled by a specialized pipe, making the overall workflow more robust and easier to debug. +This demonstrates the "divide and conquer" approach that Pipelex encourages. By breaking down a complex problem into smaller steps, each step can be handled by a specialized pipe, making the overall method more robust and easier to debug. ## Flowchart diff --git a/docs/home/4-cookbook-examples/extract-generic.md b/docs/home/4-cookbook-examples/extract-generic.md index e0cf87b1e..519beacca 100644 --- a/docs/home/4-cookbook-examples/extract-generic.md +++ b/docs/home/4-cookbook-examples/extract-generic.md @@ -24,7 +24,7 @@ async def extract_generic(pdf_url: str) -> TextAndImagesContent: return markdown_and_images ``` -The `merge_markdown_and_images` function is a great example of how you can add your own Python code to a Pipelex workflow to perform custom processing. +The `merge_markdown_and_images` function is a great example of how you can add your own Python code to a Pipelex method to perform custom processing. ```python def merge_markdown_and_images(working_memory: WorkingMemory) -> TextAndImagesContent: diff --git a/docs/home/4-cookbook-examples/extract-proof-of-purchase.md b/docs/home/4-cookbook-examples/extract-proof-of-purchase.md index 4faed4ad7..48736f345 100644 --- a/docs/home/4-cookbook-examples/extract-proof-of-purchase.md +++ b/docs/home/4-cookbook-examples/extract-proof-of-purchase.md @@ -48,7 +48,7 @@ class ProofOfPurchase(StructuredContent): ``` This demonstrates how you can create nested data structures to accurately model your data. -## The Pipeline Definition: `extract_proof_of_purchase.plx` +## The Pipeline Definition: `extract_proof_of_purchase.mthds` The pipeline uses a powerful `PipeLLM` to extract the structured data from the document. The prompt is carefully engineered to guide the LLM. diff --git a/docs/home/4-cookbook-examples/extract-table.md b/docs/home/4-cookbook-examples/extract-table.md index 2f963daec..97e9a57a1 100644 --- a/docs/home/4-cookbook-examples/extract-table.md +++ b/docs/home/4-cookbook-examples/extract-table.md @@ -56,7 +56,7 @@ class HtmlTable(StructuredContent): return self ``` -## The Pipeline Definition: `table.plx` +## The Pipeline Definition: `table.mthds` The pipeline uses a two-step "extract and review" pattern. The first pipe does the initial extraction, and the second pipe reviews the generated HTML against the original image to correct any errors. This is a powerful pattern for increasing the reliability of LLM outputs. @@ -88,4 +88,4 @@ Rewrite the entire html table with your potential corrections. Make sure you do not forget any text. """ ``` -This self-correction pattern is a key technique for building robust and reliable AI workflows with Pipelex. \ No newline at end of file +This self-correction pattern is a key technique for building robust and reliable AI methods with Pipelex. \ No newline at end of file diff --git a/docs/home/4-cookbook-examples/hello-world.md b/docs/home/4-cookbook-examples/hello-world.md index b81e1c4aa..6f18a0955 100644 --- a/docs/home/4-cookbook-examples/hello-world.md +++ b/docs/home/4-cookbook-examples/hello-world.md @@ -20,7 +20,7 @@ import asyncio from pipelex import pretty_print from pipelex.pipelex import Pipelex -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner async def hello_world(): @@ -28,9 +28,11 @@ async def hello_world(): This function demonstrates the use of a super simple Pipelex pipeline to generate text. """ # Run the pipe - pipe_output = await execute_pipeline( + runner = PipelexRunner() + response = await runner.execute_pipeline( pipe_code="hello_world", ) + pipe_output = response.pipe_output # Print the output pretty_print(pipe_output, title="Your first Pipelex output") @@ -44,7 +46,7 @@ asyncio.run(hello_world()) This example shows the minimal setup needed to run a Pipelex pipeline: initialize Pipelex, execute a pipeline by its code name, and pretty-print the results. -## The Pipeline Definition: `hello_world.plx` +## The Pipeline Definition: `hello_world.mthds` The pipeline definition is extremely simple - it's a single LLM call that generates a haiku: diff --git a/docs/home/4-cookbook-examples/index.md b/docs/home/4-cookbook-examples/index.md index b17436d70..79704d4d8 100644 --- a/docs/home/4-cookbook-examples/index.md +++ b/docs/home/4-cookbook-examples/index.md @@ -5,7 +5,7 @@ Welcome to the Pipelex Cookbook! [![GitHub](https://img.shields.io/badge/Cookbook-5a0dad?logo=github&logoColor=white&style=flat)](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -This is your go-to resource for practical examples and ready-to-use recipes to build powerful and reliable AI workflows with Pipelex. Whether you're a beginner looking to get started or an experienced user searching for advanced patterns, you'll find something useful here. +This is your go-to resource for practical examples and ready-to-use recipes to build powerful and reliable AI methods with Pipelex. Whether you're a beginner looking to get started or an experienced user searching for advanced patterns, you'll find something useful here. ## Philosophy @@ -34,7 +34,7 @@ Here are some of the examples you can find in the cookbook, organized by categor * [**Simple OCR**](./simple-ocr.md): A basic OCR pipeline to extract text from a PDF. * [**Generic Document Extraction**](./extract-generic.md): A powerful pipeline to extract text and images from complex documents. -* [**Invoice Extractor**](./invoice-extractor.md): A complete workflow for processing invoices, including reporting. +* [**Invoice Extractor**](./invoice-extractor.md): A complete method for processing invoices, including reporting. * [**Proof of Purchase Extraction**](./extract-proof-of-purchase.md): A targeted pipeline for extracting data from receipts. ### Graphical Extraction diff --git a/docs/home/4-cookbook-examples/invoice-extractor.md b/docs/home/4-cookbook-examples/invoice-extractor.md index 8dc82644c..186266061 100644 --- a/docs/home/4-cookbook-examples/invoice-extractor.md +++ b/docs/home/4-cookbook-examples/invoice-extractor.md @@ -9,7 +9,7 @@ This example provides a comprehensive pipeline for processing invoices. It takes ## The Pipeline Explained -The `process_invoice` pipeline is a complete workflow for invoice processing. +The `process_invoice` pipeline is a complete method for invoice processing. ```python async def process_invoice(pdf_url: str) -> ListContent[Invoice]: @@ -51,9 +51,9 @@ class Invoice(StructuredContent): # ... other fields ``` -## The Pipeline Definition: `invoice.plx` +## The Pipeline Definition: `invoice.mthds` -The entire workflow is defined in a PLX file. This declarative approach makes the pipeline easy to understand and modify. Here's a snippet from `invoice.plx`: +The entire method is defined in a MTHDS file. This declarative approach makes the pipeline easy to understand and modify. Here's a snippet from `invoice.mthds`: ```toml [pipe.process_invoice] @@ -89,7 +89,7 @@ The category of this invoice is: $invoice_details.category. """ ``` -This shows how a complex workflow, including text extraction with `PipeExtract` and LLM calls, can be defined in a simple, readable format. The `model = "$engineering-structured"` line is particularly powerful, as it tells the LLM to structure its output according to the `Invoice` model. +This shows how a complex method, including text extraction with `PipeExtract` and LLM calls, can be defined in a simple, readable format. The `model = "$engineering-structured"` line is particularly powerful, as it tells the LLM to structure its output according to the `Invoice` model. ## The Pipeline Flowchart diff --git a/docs/home/4-cookbook-examples/simple-ocr.md b/docs/home/4-cookbook-examples/simple-ocr.md index bccfa51cd..58f4633a7 100644 --- a/docs/home/4-cookbook-examples/simple-ocr.md +++ b/docs/home/4-cookbook-examples/simple-ocr.md @@ -2,7 +2,7 @@ This example demonstrates a basic OCR (Optical Character Recognition) pipeline. It takes a PDF file as input, extracts the text from each page, and saves the content. -This is a fundamental building block for many document processing workflows. +This is a fundamental building block for many document processing methods. ## Get the code diff --git a/docs/home/4-cookbook-examples/write-tweet.md b/docs/home/4-cookbook-examples/write-tweet.md index a3454a708..1825cd2c5 100644 --- a/docs/home/4-cookbook-examples/write-tweet.md +++ b/docs/home/4-cookbook-examples/write-tweet.md @@ -36,7 +36,7 @@ This example shows how to use multiple inputs to guide the generation process an ## The Data Structure: `OptimizedTweet` Model -The data model for this pipeline is very simple, as the final output is just a piece of text. However, the pipeline uses several concepts internally to manage the workflow, such as `DraftTweet`, `TweetAnalysis`, and `WritingStyle`. +The data model for this pipeline is very simple, as the final output is just a piece of text. However, the pipeline uses several concepts internally to manage the method, such as `DraftTweet`, `TweetAnalysis`, and `WritingStyle`. ```python class OptimizedTweet(TextContent): @@ -44,7 +44,7 @@ class OptimizedTweet(TextContent): pass ``` -## The Pipeline Definition: `tech_tweet.plx` +## The Pipeline Definition: `tech_tweet.mthds` This pipeline uses a two-step "analyze and optimize" sequence. The first pipe analyzes the draft tweet for common pitfalls, and the second pipe rewrites the tweet based on the analysis and a provided writing style. This is a powerful pattern for refining generated content. @@ -82,7 +82,7 @@ Evaluate the tweet for these key issues: @draft_tweet """ ``` -This "analyze and refine" pattern is a great way to build more reliable and sophisticated text generation workflows. The first step provides a structured critique, and the second step uses that critique to improve the final output. +This "analyze and refine" pattern is a great way to build more reliable and sophisticated text generation methods. The first step provides a structured critique, and the second step uses that critique to improve the final output. Here is the flowchart generated during this run: diff --git a/docs/home/5-setup/configure-ai-providers.md b/docs/home/5-setup/configure-ai-providers.md index 881648662..8fa346266 100644 --- a/docs/home/5-setup/configure-ai-providers.md +++ b/docs/home/5-setup/configure-ai-providers.md @@ -173,10 +173,10 @@ Learn more in our [Inference Backend Configuration](../../home/7-configuration/c Now that you have your backend configured: 1. **Organize your project**: [Project Organization](./project-organization.md) -2. **Learn the concepts**: [Writing Workflows Tutorial](../../home/2-get-started/pipe-builder.md) +2. **Learn the concepts**: [Writing Methods Tutorial](../../home/2-get-started/pipe-builder.md) 3. **Explore examples**: [Cookbook Repository](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -4. **Deep dive**: [Build Reliable AI Workflows](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) +4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md) !!! tip "Advanced Configuration" For detailed backend configuration options, see [Inference Backend Configuration](../../home/7-configuration/config-technical/inference-backend-config.md). diff --git a/docs/home/5-setup/index.md b/docs/home/5-setup/index.md index 61a3cc0b7..2051dd126 100644 --- a/docs/home/5-setup/index.md +++ b/docs/home/5-setup/index.md @@ -12,7 +12,7 @@ If you already have a project running and want to tune behavior, jump to [Config ## Quick guide - **Need to run pipelines with LLMs?** Start with [Configure AI Providers](./configure-ai-providers.md). -- **Need a recommended repo layout for `.plx` and Python code?** See [Project Organization](./project-organization.md). +- **Need a recommended repo layout for `.mthds` and Python code?** See [Project Organization](./project-organization.md). - **Need to understand telemetry and privacy trade-offs?** See [Telemetry](./telemetry.md). - **Ready to tune the knobs?** Go to [Configuration Overview](../7-configuration/index.md). diff --git a/docs/home/5-setup/project-organization.md b/docs/home/5-setup/project-organization.md index d62e3bd72..12ec2dd90 100644 --- a/docs/home/5-setup/project-organization.md +++ b/docs/home/5-setup/project-organization.md @@ -2,20 +2,21 @@ ## Overview -Pipelex automatically discovers `.plx` pipeline files anywhere in your project (excluding `.venv`, `.git`, `node_modules`, etc.). +Pipelex automatically discovers `.mthds` pipeline files anywhere in your project (excluding `.venv`, `.git`, `node_modules`, etc.). ## Recommended: Keep pipelines with related code ```bash your_project/ -β”œβ”€β”€ my_project/ # Your Python package +β”œβ”€β”€ METHODS.toml # Package manifest (optional) +β”œβ”€β”€ my_project/ # Your Python package β”‚ β”œβ”€β”€ finance/ β”‚ β”‚ β”œβ”€β”€ services.py -β”‚ β”‚ β”œβ”€β”€ invoices.plx # Pipeline with finance code +β”‚ β”‚ β”œβ”€β”€ invoices.mthds # Pipeline with finance code β”‚ β”‚ └── invoices_struct.py # Structure classes β”‚ └── legal/ β”‚ β”œβ”€β”€ services.py -β”‚ β”œβ”€β”€ contracts.plx # Pipeline with legal code +β”‚ β”œβ”€β”€ contracts.mthds # Pipeline with legal code β”‚ └── contracts_struct.py β”œβ”€β”€ .pipelex/ # Config at repo root β”‚ └── pipelex.toml @@ -23,19 +24,21 @@ your_project/ └── requirements.txt ``` +- **Package manifest**: `METHODS.toml` at your project root declares package identity and pipe visibility. See [Packages](../6-build-reliable-ai-workflows/packages.md) for details. + ## Alternative: Centralize pipelines ```bash your_project/ β”œβ”€β”€ pipelines/ -β”‚ β”œβ”€β”€ invoices.plx -β”‚ β”œβ”€β”€ contracts.plx +β”‚ β”œβ”€β”€ invoices.mthds +β”‚ β”œβ”€β”€ contracts.mthds β”‚ └── structures.py └── .pipelex/ └── pipelex.toml ``` -Learn more in our [Project Structure documentation](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md). +Learn more in our [Project Structure documentation](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md). --- @@ -51,8 +54,8 @@ Learn more in our [Project Structure documentation](../../home/6-build-reliable- Now that you understand project organization: 1. **Start building**: [Get Started](../../home/2-get-started/pipe-builder.md) -2. **Learn the concepts**: [Writing Workflows Tutorial](../../home/2-get-started/pipe-builder.md) +2. **Learn the concepts**: [Writing Methods Tutorial](../../home/2-get-started/pipe-builder.md) 3. **Explore examples**: [Cookbook Repository](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -4. **Deep dive**: [Build Reliable AI Workflows](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) +4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md) diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md b/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md index f2f4270c9..c1fc9447f 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md @@ -1,6 +1,6 @@ # Defining Your Concepts -Concepts are the foundation of reliable AI workflows. They define what flows through your pipesβ€”not just as data types, but as meaningful pieces of knowledge with clear boundaries and validation rules. +Concepts are the foundation of reliable AI methods. They define what flows through your pipesβ€”not just as data types, but as meaningful pieces of knowledge with clear boundaries and validation rules. ## Writing Concept Definitions @@ -72,7 +72,7 @@ Those concepts will be Text-based by default. If you want to use structured outp Group concepts that naturally belong together in the same domain. A domain acts as a namespace for a set of related concepts and pipes, helping you organize and reuse your pipeline components. You can learn more about them in [Understanding Domains](../domain.md). ```toml -# finance.plx +# finance.mthds domain = "finance" description = "Financial document processing" @@ -86,7 +86,7 @@ LineItem = "An individual item or service listed in a financial document" ## Get Started with Inline Structures -To add structure to your concepts, the **recommended approach** is using **inline structures** directly in your `.plx` files. Inline structures support all field types including nested concepts: +To add structure to your concepts, the **recommended approach** is using **inline structures** directly in your `.mthds` files. Inline structures support all field types including nested concepts: ```toml [concept.Customer] diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md b/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md index 06f0025a1..7d82053f8 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md @@ -1,6 +1,6 @@ # Inline Structure Definition -Define structured concepts directly in your `.plx` files using pipelex syntax. This is the **recommended approach** for most use cases, offering rapid development without Python boilerplate. +Define structured concepts directly in your `.mthds` files using pipelex syntax. This is the **recommended approach** for most use cases, offering rapid development without Python boilerplate. For an introduction to concepts themselves, see [Define Your Concepts](define_your_concepts.md). For advanced features requiring Python classes, see [Python StructuredContent Classes](python-classes.md). @@ -246,11 +246,11 @@ The `pipelex build structures` command generates Python classes from your inline ### Usage ```bash -# Generate from a directory of .plx files +# Generate from a directory of .mthds files pipelex build structures ./my_pipelines/ -# Generate from a specific .plx file -pipelex build structures ./my_pipeline/bundle.plx +# Generate from a specific .mthds file +pipelex build structures ./my_pipeline/bundle.mthds # Specify output directory pipelex build structures ./my_pipelines/ -o ./generated/ @@ -306,5 +306,5 @@ See [Python StructuredContent Classes](python-classes.md) for advanced features. - [Define Your Concepts](define_your_concepts.md) - Learn about concept semantics and naming - [Python StructuredContent Classes](python-classes.md) - Advanced features with Python -- [Writing Workflows Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs +- [Writing Methods Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md b/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md index 98515c181..9ba73cf3b 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md @@ -1,12 +1,12 @@ # Native Concepts -Pipelex includes several built-in native concepts that cover common data types in AI workflows. These concepts come with predefined structures and are automatically available in all pipelinesβ€”no setup required. +Pipelex includes several built-in native concepts that cover common data types in AI methods. These concepts come with predefined structures and are automatically available in all pipelinesβ€”no setup required. For an introduction to concepts, see [Define Your Concepts](define_your_concepts.md). ## What Are Native Concepts? -Native concepts are ready-to-use building blocks for AI workflows. They represent common data types you'll frequently work with: text, images, documents, numbers, and combinations thereof. +Native concepts are ready-to-use building blocks for AI methods. They represent common data types you'll frequently work with: text, images, documents, numbers, and combinations thereof. **Key characteristics:** @@ -133,7 +133,7 @@ class DynamicContent(StuffContent): pass ``` -**Use for:** Workflows where the content structure isn't known in advance. +**Use for:** Methods where the content structure isn't known in advance. ### JSONContent @@ -189,7 +189,7 @@ output = "Page" This extracts each page with both its text/images and a visual representation. -### In Complex Workflows +### In Complex Methods ```toml [pipe.create_report] @@ -223,7 +223,7 @@ Refine native concepts when: - βœ… You need semantic specificity (e.g., `Invoice` vs `Document`) - βœ… You want to add custom structure on top of the base structure -- βœ… Building domain-specific workflows +- βœ… Building domain-specific methods - βœ… Need type safety for specific document types ## Common Patterns @@ -286,5 +286,5 @@ Analyze this image: $image" - [Define Your Concepts](define_your_concepts.md) - Learn about concept semantics - [Inline Structures](inline-structures.md) - Add structure to refined concepts - [Python StructuredContent Classes](python-classes.md) - Advanced customization -- [Writing Workflows Tutorial](../../2-get-started/pipe-builder.md) - Use native concepts in pipelines +- [Writing Methods Tutorial](../../2-get-started/pipe-builder.md) - Use native concepts in pipelines diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md b/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md index c2d46a837..dc19439c7 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md @@ -122,7 +122,7 @@ age = { type = "integer", description = "User's age", required = false } **Step 2: Generate the base class** ```bash -pipelex build structures ./my_pipeline.plx -o ./structures/ +pipelex build structures ./my_pipeline.mthds -o ./structures/ ``` **Step 3: Add custom validation** @@ -151,7 +151,7 @@ class UserProfile(StructuredContent): return v ``` -**Step 4: Update your .plx file** +**Step 4: Update your .mthds file** ```toml [concept] @@ -184,7 +184,7 @@ in_stock = { type = "boolean", description = "Stock availability", default_value **2. Generate the Python class:** ```bash -pipelex build structures ./ecommerce.plx -o ./structures/ +pipelex build structures ./ecommerce.mthds -o ./structures/ ``` **3. Add your custom logic** to the generated file: @@ -217,7 +217,7 @@ class Product(StructuredContent): return f"${self.price:.2f}" ``` -**4. Update your `.plx` file:** +**4. Update your `.mthds` file:** ```toml domain = "ecommerce" @@ -255,5 +255,5 @@ Product = "A product in the catalog" - [Inline Structures](inline-structures.md) - Fast prototyping with TOML - [Define Your Concepts](define_your_concepts.md) - Learn about concept semantics and naming -- [Writing Workflows Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs +- [Writing Methods Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md b/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md index 6412e8d1c..de5cf0021 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md @@ -1,6 +1,6 @@ # Refining Concepts -Concept refinement allows you to create more specific versions of existing concepts while inheriting their structure. This provides semantic clarity and type safety for domain-specific workflows. +Concept refinement allows you to create more specific versions of existing concepts while inheriting their structure. This provides semantic clarity and type safety for domain-specific methods. ## What is Concept Refinement? @@ -37,7 +37,7 @@ inputs = { contract = "Contract" } # Clear what type of document is expected output = "ContractTerms" ``` -### 3. Domain-Specific Workflows +### 3. Domain-Specific Methods Build pipelines tailored to specific use cases: @@ -184,6 +184,92 @@ refines = "Customer" Both `VIPCustomer` and `InactiveCustomer` will have access to the `name` and `email` fields defined in `Customer`. When you create content for these concepts, it will be compatible with the base `Customer` structure. +## Cross-Package Refinement + +You can refine concepts that live in a different package. This lets you specialize a shared concept from a dependency without modifying the dependency itself. + +### Syntax + +Use the `->` cross-package reference operator in the `refines` field: + +```toml +[concept.RefinedConcept] +description = "A more specialized version of a cross-package concept" +refines = "alias->domain.BaseConceptCode" +``` + +| Part | Description | +|------|-------------| +| `alias` | The dependency alias declared in your `METHODS.toml` `[dependencies]` section | +| `->` | Cross-package reference operator | +| `domain` | The dot-separated domain path inside the dependency package | +| `BaseConceptCode` | The `PascalCase` concept code to refine | + +### Full Example + +Suppose you depend on a scoring library that defines a `WeightedScore` concept: + +**Dependency package** (`scoring-lib`): + +```toml title="METHODS.toml" +[package] +address = "github.com/acme/scoring-lib" +version = "2.0.0" +description = "Scoring utilities." + +[exports.scoring] +pipes = ["compute_weighted_score"] +``` + +```toml title="scoring.mthds" +domain = "scoring" + +[concept.WeightedScore] +description = "A weighted score result" + +[pipe.compute_weighted_score] +type = "PipeLLM" +description = "Compute a weighted score" +output = "WeightedScore" +prompt = "Compute a weighted score for: {{ item }}" +``` + +**Your consumer package**: + +```toml title="METHODS.toml" +[package] +address = "github.com/acme/analysis-app" +version = "1.0.0" +description = "Analysis application." + +[dependencies] +scoring_lib = { address = "github.com/acme/scoring-lib", version = "^2.0.0" } + +[exports.analysis] +pipes = ["compute_detailed_score"] +``` + +```toml title="analysis.mthds" +domain = "analysis" + +[concept.DetailedScore] +description = "An extended score with additional detail" +refines = "scoring_lib->scoring.WeightedScore" + +[pipe.compute_detailed_score] +type = "PipeLLM" +description = "Compute a detailed score" +output = "DetailedScore" +prompt = "Compute a detailed score for: {{ item }}" +``` + +`DetailedScore` inherits the structure of `WeightedScore` from the `scoring_lib` dependency's `scoring` domain. + +!!! important + The base concept must be accessible from the dependency. The dependency must export the pipes in the domain that contains the concept, or the concept's domain must be reachable via an exported pipe's bundle. + +For more on how dependencies and cross-package references work, see [Packages](../packages.md#cross-package-references). + ## Type Compatibility Understanding how refined concepts interact with pipe inputs is crucial. @@ -287,7 +373,7 @@ refines = "Document" - βœ… Your concept is semantically a specific type of an existing concept - βœ… The base concept's structure is sufficient for your needs - βœ… You want to inherit existing validation and behavior -- βœ… You're building domain-specific workflows with clear document/content types +- βœ… You're building domain-specific methods with clear document/content types - βœ… You need to create specialized versions of an existing concept **Examples:** @@ -312,4 +398,5 @@ refines = "Customer" - [Native Concepts](native-concepts.md) - Complete guide to native concepts - [Inline Structures](inline-structures.md) - Add structure to concepts - [Python StructuredContent Classes](python-classes.md) - Advanced customization +- [Packages](../packages.md) - Package system, dependencies, and cross-package references diff --git a/docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md b/docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md similarity index 100% rename from docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md rename to docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md diff --git a/docs/home/6-build-reliable-ai-workflows/domain.md b/docs/home/6-build-reliable-ai-workflows/domain.md index 93b86d62c..8482bd477 100644 --- a/docs/home/6-build-reliable-ai-workflows/domain.md +++ b/docs/home/6-build-reliable-ai-workflows/domain.md @@ -1,6 +1,6 @@ # Understanding Domains -A domain in Pipelex is a **semantic namespace** that organizes related concepts and pipes. It's declared at the top of every `.plx` file and serves as an identifier for grouping related functionality. +A domain in Pipelex is a **semantic namespace** that organizes related concepts and pipes. It's declared at the top of every `.mthds` file and serves as an identifier for grouping related functionality. ## What is a Domain? @@ -12,7 +12,7 @@ A domain is defined by three properties: ## Declaring a Domain -Every `.plx` file must declare its domain at the beginning: +Every `.mthds` file must declare its domain at the beginning: ```toml domain = "invoice_processing" @@ -39,6 +39,37 @@ system_prompt = "You are an expert in financial document analysis and invoice pr ❌ domain = "invoiceProcessing" # camelCase not allowed ``` +## Hierarchical Domains + +Domains support **dotted paths** to express a hierarchy: + +```toml +domain = "legal" +domain = "legal.contracts" +domain = "legal.contracts.shareholder" +``` + +Each segment must be `snake_case`. The hierarchy is organizational β€” there is no scope inheritance between parent and child domains. `legal.contracts` and `legal` are independent namespaces; defining concepts in one does not affect the other. + +**Valid hierarchical domains:** + +```toml +βœ… domain = "legal.contracts" +βœ… domain = "legal.contracts.shareholder" +βœ… domain = "finance.reporting" +``` + +**Invalid hierarchical domains:** + +```toml +❌ domain = ".legal" # Cannot start with a dot +❌ domain = "legal." # Cannot end with a dot +❌ domain = "legal..contracts" # No consecutive dots +❌ domain = "Legal.Contracts" # Segments must be snake_case +``` + +Hierarchical domains are used in the `[exports]` section of `METHODS.toml` to control pipe visibility across domains. See [Packages](./packages.md) for details. + ## How Domains Work ### Concept Namespacing @@ -68,14 +99,14 @@ This creates two concepts: The domain code prevents naming conflicts. Multiple bundles can define concepts with the same name if they're in different domains: ```toml -# finance.plx +# finance.mthds domain = "finance" [concept] Report = "A financial report" ``` ```toml -# marketing.plx +# marketing.mthds domain = "marketing" [concept] Report = "A marketing campaign report" @@ -85,17 +116,17 @@ Result: Two different concepts (`finance.Report` and `marketing.Report`) with no ### Multiple Bundles, Same Domain -Multiple `.plx` files can declare the same domain. They all contribute to that domain's namespace: +Multiple `.mthds` files can declare the same domain. They all contribute to that domain's namespace: ```toml -# finance_invoices.plx +# finance_invoices.mthds domain = "finance" [concept] Invoice = "..." ``` ```toml -# finance_payments.plx +# finance_payments.mthds domain = "finance" [concept] Payment = "..." @@ -170,7 +201,8 @@ Individual pipes can override the domain system prompt by defining their own `sy ## Related Documentation +- [Packages](./packages.md) - Controlling pipe visibility with exports - [Pipelex Bundle Specification](./pipelex-bundle-specification.md) - How domains are declared in bundles -- [Kick off a Pipelex Workflow Project](./kick-off-a-pipelex-workflow-project.md) - Getting started +- [Kick off a Pipelex Method Project](./kick-off-a-methods-project.md) - Getting started - [Define Your Concepts](./concepts/define_your_concepts.md) - Creating concepts within domains - [Designing Pipelines](./pipes/index.md) - Building pipes within domains diff --git a/docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md b/docs/home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md similarity index 82% rename from docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md rename to docs/home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md index f5e4d368b..2c0e8c32a 100644 --- a/docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md +++ b/docs/home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md @@ -1,10 +1,10 @@ -# Kicking off a Pipelex Workflow Project +# Kicking off a Pipelex Method Project ## Creating Your First Pipeline -A pipeline in Pipelex is a collection of related concepts and pipes. Start by creating a PLX file in your project: +A pipeline in Pipelex is a collection of related concepts and pipes. Start by creating a MTHDS file in your project: -`tutorial.plx` +`tutorial.mthds` ```toml domain = "tutorial" description = "My first Pipelex library" @@ -48,20 +48,20 @@ See more about domains in [Understanding Domains](./domain.md) Consistent naming makes your pipeline code discoverable and maintainable: -### PLX Files -- Use lowercase with underscores: `legal_contracts.plx`, `customer_service.plx` -- Match the domain name when possible: domain "legal" β†’ `legal.plx` -- For multi-word domains, use underscores: domain "customer_service" β†’ `customer_service.plx` +### MTHDS Files +- Use lowercase with underscores: `legal_contracts.mthds`, `customer_service.mthds` +- Match the domain name when possible: domain "legal" β†’ `legal.mthds` +- For multi-word domains, use underscores: domain "customer_service" β†’ `customer_service.mthds` See more about pipelex bundle specification in [Pipelex Bundle Specification](./pipelex-bundle-specification.md) ### Python Model Files -- It is recommended to name structure files with a `_struct.py` suffix: `legal.plx` β†’ `legal_struct.py` +- It is recommended to name structure files with a `_struct.py` suffix: `legal.mthds` β†’ `legal_struct.py` - Pipelex will automatically discover and load structure classes from all Python files in your project (excluding common directories like `.venv`, `.git`, etc.) ## Project Structure -**Key principle:** Put `.plx` files where they belong in YOUR codebase. Pipelex automatically finds them. +**Key principle:** Put `.mthds` files where they belong in YOUR codebase. Pipelex automatically finds them. ### Recommended Patterns @@ -72,11 +72,11 @@ your-project/ β”‚ β”œβ”€β”€ finance/ β”‚ β”‚ β”œβ”€β”€ models.py β”‚ β”‚ β”œβ”€β”€ services.py -β”‚ β”‚ β”œβ”€β”€ invoices.plx # Pipeline with finance code +β”‚ β”‚ β”œβ”€β”€ invoices.mthds # Pipeline with finance code β”‚ β”‚ └── invoices_struct.py # Structure classes β”‚ └── legal/ β”‚ β”œβ”€β”€ models.py -β”‚ β”œβ”€β”€ contracts.plx # Pipeline with legal code +β”‚ β”œβ”€β”€ contracts.mthds # Pipeline with legal code β”‚ └── contracts_struct.py β”œβ”€β”€ .pipelex/ # Config at repo root β”‚ β”œβ”€β”€ pipelex.toml @@ -89,9 +89,9 @@ your-project/ your-project/ β”œβ”€β”€ my_project/ β”‚ β”œβ”€β”€ pipelines/ # All pipelines together -β”‚ β”‚ β”œβ”€β”€ finance.plx +β”‚ β”‚ β”œβ”€β”€ finance.mthds β”‚ β”‚ β”œβ”€β”€ finance_struct.py -β”‚ β”‚ β”œβ”€β”€ legal.plx +β”‚ β”‚ β”œβ”€β”€ legal.mthds β”‚ β”‚ └── legal_struct.py β”‚ └── core/ β”‚ └── (your code) @@ -102,7 +102,7 @@ your-project/ ``` your-project/ β”œβ”€β”€ my_project/ -β”‚ β”œβ”€β”€ invoice_pipeline.plx +β”‚ β”œβ”€β”€ invoice_pipeline.mthds β”‚ β”œβ”€β”€ invoice_struct.py β”‚ └── main.py └── .pipelex/ @@ -110,7 +110,7 @@ your-project/ ### Key Points -- **Flexible placement**: `.plx` files work anywhere in your project +- **Flexible placement**: `.mthds` files work anywhere in your project - **Automatic discovery**: Pipelex scans and finds them automatically - **Configuration location**: `.pipelex/` stays at repository root - **Naming convention**: Use `_struct.py` suffix for structure files diff --git a/docs/home/6-build-reliable-ai-workflows/libraries.md b/docs/home/6-build-reliable-ai-workflows/libraries.md index 87d980035..794d30d26 100644 --- a/docs/home/6-build-reliable-ai-workflows/libraries.md +++ b/docs/home/6-build-reliable-ai-workflows/libraries.md @@ -10,7 +10,7 @@ A Library is composed of three core components: - **ConceptLibrary**: Manages all concept definitions across domains - **PipeLibrary**: Manages all pipe definitions -These three components together form what we call a **Pipelex Bundle** (the content you define in `.plx` files). Learn more about bundle structure and syntax in the [Pipelex Bundle Specification](./pipelex-bundle-specification.md). +These three components together form what we call a **Pipelex Bundle** (the content you define in `.mthds` files). Learn more about bundle structure and syntax in the [Pipelex Bundle Specification](./pipelex-bundle-specification.md). ## Understanding Library Scope @@ -18,7 +18,7 @@ When you execute pipelines using `execute_pipeline` or `start_pipeline`, a libra - Contains the pipes and concepts available for execution - Provides isolation between different pipeline runs when using different library IDs -- Can be loaded from local directories or from PLX content strings +- Can be loaded from local directories or from MTHDS content strings ## Uniqueness Rules @@ -41,7 +41,7 @@ Libraries enforce specific uniqueness constraints to maintain consistency: Currently, all libraries are **local**, meaning they are loaded from: - Directories on your filesystem (using `library_dirs` parameter) -- PLX content strings (using `plx_content` parameter) +- MTHDS content strings (using `mthds_content` parameter) - The current working directory (default behavior) ```python @@ -90,7 +90,7 @@ The library is populated based on the parameters you provide: **Option A: Loading from directories** ```python -# Loads all .plx files from specified directories +# Loads all .mthds files from specified directories pipe_output = await execute_pipeline( pipe_code="my_pipe", library_dirs=["./pipelines"], @@ -98,11 +98,11 @@ pipe_output = await execute_pipeline( ) ``` -**Option B: Loading from PLX content** +**Option B: Loading from MTHDS content** ```python -# Loads only the provided PLX content -plx_content = """ +# Loads only the provided MTHDS content +mthds_content = """ domain = "marketing" [concept] @@ -116,7 +116,7 @@ prompt = "Generate a tagline for: @desc" """ pipe_output = await execute_pipeline( - plx_content=plx_content, + mthds_content=mthds_content, pipe_code="my_pipe", inputs={...}, ) @@ -165,16 +165,16 @@ pipe_output = await execute_pipeline( ) ``` -### 2. Use PLX Content for Dynamic Pipelines +### 2. Use MTHDS Content for Dynamic Pipelines -When generating or modifying pipelines dynamically, use `plx_content`: +When generating or modifying pipelines dynamically, use `mthds_content`: ```python -# Generate PLX content dynamically -plx_content = generate_custom_pipeline(user_requirements) +# Generate MTHDS content dynamically +mthds_content = generate_custom_pipeline(user_requirements) pipe_output = await execute_pipeline( - plx_content=plx_content, + mthds_content=mthds_content, inputs={...}, ) ``` @@ -208,7 +208,7 @@ output2 = await execute_pipeline( ## Related Documentation - [Executing Pipelines](pipes/executing-pipelines.md) - Learn how to execute pipelines with different library configurations -- [Pipelex Bundle Specification](./pipelex-bundle-specification.md) - Understand the structure of PLX files +- [Pipelex Bundle Specification](./pipelex-bundle-specification.md) - Understand the structure of MTHDS files - [Domains](./domain.md) - Learn about organizing pipes into domains - [Concepts](./concepts/define_your_concepts.md) - Understand how concepts work within libraries diff --git a/docs/home/6-build-reliable-ai-workflows/packages.md b/docs/home/6-build-reliable-ai-workflows/packages.md new file mode 100644 index 000000000..ef6ff7dfb --- /dev/null +++ b/docs/home/6-build-reliable-ai-workflows/packages.md @@ -0,0 +1,474 @@ +# Packages + +A **package** is a self-contained collection of `.mthds` bundles with a `METHODS.toml` manifest at the root. The manifest gives your project an identity, declares dependencies on other packages, and controls which pipes are visible to the outside world. + +## What is a Package? + +A package groups related bundles under a single manifest that provides: + +- **Identity** β€” a unique address and semantic version for your project +- **Dependency declarations** β€” references to other packages your pipes rely on +- **Visibility control** β€” fine-grained exports that determine which pipes other domains can reference + +!!! info "Backward Compatibility" + If your project has no `METHODS.toml`, everything works exactly as before β€” all pipes are treated as public. The manifest is entirely opt-in. + +## The Package Manifest: `METHODS.toml` + +Place a `METHODS.toml` file at the root of your project (next to your `.mthds` files or their parent directories). Here is a fully annotated example: + +```toml +[package] +address = "github.com/acme/legal-tools" +version = "1.0.0" +description = "Legal document analysis and contract review methods." +authors = ["Acme Corp"] +license = "MIT" +mthds_version = ">=0.5.0" + +[dependencies] +scoring_lib = { address = "github.com/acme/scoring-lib", version = "^2.0.0" } + +[exports.legal.contracts] +pipes = ["extract_clause", "analyze_contract"] + +[exports.scoring] +pipes = ["compute_weighted_score"] +``` + +### Field Reference + +| Field | Required | Description | +|-------|----------|-------------| +| `address` | Yes | Package address following a hostname/path pattern (e.g. `github.com/org/repo`) | +| `version` | Yes | Semantic version (e.g. `1.0.0`, `2.1.3-beta.1`) | +| `description` | Yes | Human-readable package description (must not be empty) | +| `authors` | No | List of author names | +| `license` | No | SPDX license identifier (e.g. `MIT`, `Apache-2.0`) | +| `mthds_version` | No | Required MTHDS runtime version constraint | + +## Dependencies + +Dependencies are declared in the `[dependencies]` section using an alias-as-key format. + +### Declaring Dependencies + +Each dependency entry maps a **snake_case alias** to a package address and version constraint: + +```toml +[dependencies] +scoring_lib = { address = "github.com/acme/scoring-lib", version = "^2.0.0" } +nlp_utils = { address = "github.com/acme/nlp-utils", version = ">=1.0.0, <3.0.0" } +``` + +- The **alias** (left-hand key) must be `snake_case`. It is used when making cross-package references with the `->` syntax (e.g. `scoring_lib->scoring.compute_weighted_score`). +- The **address** follows the same hostname/path pattern as the package address. +- Each dependency alias must be unique within the manifest. + +### Version Constraints + +The **version** field accepts standard version constraint syntax: + +| Syntax | Meaning | Example | +|--------|---------|---------| +| `1.0.0` | Exact version | `1.0.0` | +| `^1.0.0` | Compatible release (same major) | `^2.0.0` | +| `~1.0.0` | Approximately compatible (same major.minor) | `~1.2.0` | +| `>=`, `<=`, `>`, `<` | Comparison operators | `>=1.0.0` | +| `==`, `!=` | Equality / inequality | `!=1.3.0` | +| Comma-separated | Compound constraints | `>=1.0.0, <2.0.0` | +| `*`, `1.*`, `1.0.*` | Wildcards | `2.*` | + +### Local Path Dependencies + +For development or when you maintain related packages side by side, declare a dependency with a `path` field pointing to a local directory: + +```toml +[dependencies] +scoring_lib = { address = "github.com/acme/scoring-lib", version = "2.0.0", path = "../scoring-lib" } +``` + +When a `path` is present: + +- The local directory is used directly β€” no VCS fetch occurs. +- The dependency is **excluded from the lock file** (`methods.lock`). +- Cross-package references work identically to remote dependencies. + +!!! tip "Development Workflow" + Local path dependencies are ideal during active development of multiple packages. Point to a sibling checkout, iterate on both packages together, and remove the `path` field when you are ready to publish. + +### Remote Dependencies + +Dependencies without a `path` field are resolved via Git. Pipelex maps the package address to a clone URL (e.g. `github.com/acme/scoring-lib` becomes `https://github.com/acme/scoring-lib.git`), lists the remote version tags, selects the best match for the version constraint, and caches the result locally. + +See [Dependency Workflow](#dependency-workflow) below for the full lock β†’ install β†’ update lifecycle. + +## Cross-Package References + +Once a dependency is declared in `METHODS.toml`, you can reference its exported pipes and concepts from your `.mthds` bundles using the **`->`** syntax. + +### The `->` Syntax + +A cross-package reference has the form: + +``` +alias->domain.code +``` + +| Part | Description | +|------|-------------| +| `alias` | The dependency alias declared in `[dependencies]` | +| `->` | Cross-package reference operator | +| `domain` | The dot-separated domain path inside the dependency package | +| `code` | The pipe code (`snake_case`) or concept code (`PascalCase`) | + +### Referencing Pipes Across Packages + +To call a pipe from a dependency inside a `PipeSequence` step, use the `->` syntax in the `pipe` field. + +**Dependency package** (`scoring-lib`): + +```toml title="METHODS.toml" +[package] +address = "github.com/acme/scoring-lib" +version = "2.0.0" +description = "Scoring utilities for weighted analysis." + +[exports.scoring] +pipes = ["compute_weighted_score"] +``` + +```toml title="scoring.mthds" +domain = "scoring" + +[concept.WeightedScore] +description = "A weighted score result" + +[pipe.compute_weighted_score] +type = "PipeLLM" +description = "Compute a weighted score" +output = "WeightedScore" +prompt = "Compute a weighted score for: {{ item }}" +``` + +**Consumer package**: + +```toml title="METHODS.toml" +[package] +address = "github.com/acme/analysis-app" +version = "1.0.0" +description = "Analysis application using the scoring library." + +[dependencies] +scoring_lib = { address = "github.com/acme/scoring-lib", version = "^2.0.0" } + +[exports.analysis] +pipes = ["analyze_item"] +``` + +```toml title="analysis.mthds" +domain = "analysis" +main_pipe = "analyze_item" + +[pipe.analyze_item] +type = "PipeSequence" +description = "Analyze an item using the scoring dependency" +output = "AnalysisResult" +steps = [ + { pipe = "scoring_lib->scoring.compute_weighted_score" }, + { pipe = "summarize" }, +] +``` + +The first step calls `compute_weighted_score` from the `scoring` domain of the `scoring_lib` dependency. The second step calls a local pipe. + +!!! important + The referenced pipe must be listed in the dependency's `[exports]` section (or be a bundle's `main_pipe`, which is auto-exported). Referencing a non-exported pipe raises a visibility error at load time. + +### Referencing Concepts Across Packages + +Concepts from a dependency can be used in pipe inputs and outputs using the same `->` syntax: + +```toml +[pipe.display_score] +type = "PipeLLM" +description = "Format a score for display" +inputs = { score = "scoring_lib->scoring.WeightedScore" } +output = "Text" +prompt = "Format this score for display: {{ score }}" +``` + +### Cross-Package Concept Refinement + +You can refine a concept from a dependency β€” creating a more specialized version that inherits its structure: + +```toml +[concept.DetailedScore] +description = "An extended score with additional detail" +refines = "scoring_lib->scoring.WeightedScore" +``` + +The refined concept inherits the structure of `WeightedScore` from the `scoring_lib` dependency's `scoring` domain. The base concept must be exported by the dependency. + +For a complete guide on concept refinement, see [Refining Concepts](./concepts/refining-concepts.md#cross-package-refinement). + +## Dependency Workflow + +Managing dependencies follows a **lock β†’ install β†’ update** lifecycle, similar to other package managers. + +### Lock File (`methods.lock`) + +Running `pipelex pkg lock` generates a `methods.lock` file next to your `METHODS.toml`. The lock file records the exact resolved version, an integrity hash, and the source URL for every remote dependency: + +```toml +["github.com/acme/scoring-lib"] +version = "2.0.0" +hash = "sha256:a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2" +source = "https://github.com/acme/scoring-lib" + +["github.com/acme/nlp-utils"] +version = "1.3.0" +hash = "sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" +source = "https://github.com/acme/nlp-utils" +``` + +| Field | Description | +|-------|-------------| +| Table key | The package address | +| `version` | Exact resolved version (semantic version) | +| `hash` | SHA-256 integrity hash of all files in the package (excluding `.git/`) | +| `source` | HTTPS URL to the package source | + +!!! note "Commit to Version Control" + You should commit `methods.lock` to your repository. This ensures that every collaborator and CI run installs the exact same dependency versions. + +Local path dependencies are **not** recorded in the lock file β€” they are always resolved from the filesystem directly. + +### Resolving and Locking (`pkg lock`) + +```bash +pipelex pkg lock +``` + +This command: + +1. Reads your `METHODS.toml` dependencies +2. Resolves each remote dependency via Git (listing tags, selecting the best version match) +3. Resolves transitive dependencies (dependencies of your dependencies) +4. Computes SHA-256 integrity hashes +5. Writes the `methods.lock` file + +See the [Pkg Lock CLI reference](../9-tools/cli/pkg.md#pkg-lock) for details. + +### Installing Dependencies (`pkg install`) + +```bash +pipelex pkg install +``` + +This command: + +1. Reads the `methods.lock` file +2. Fetches any packages not already present in the local cache +3. Verifies SHA-256 integrity of all cached packages against the lock file + +If a hash mismatch is detected, the command fails with an integrity error. + +See the [Pkg Install CLI reference](../9-tools/cli/pkg.md#pkg-install) for details. + +### Updating Dependencies (`pkg update`) + +```bash +pipelex pkg update +``` + +This command performs a **fresh resolve** β€” it ignores the existing lock file, re-resolves all dependencies from scratch, and rewrites `methods.lock`. It displays a diff showing added, removed, and updated packages. + +!!! tip + Use `pkg update` after changing version constraints in `METHODS.toml`. For day-to-day reproducible builds, use `pkg install` instead. + +See the [Pkg Update CLI reference](../9-tools/cli/pkg.md#pkg-update) for details. + +### Transitive Dependencies + +Pipelex resolves transitive dependencies automatically. If your dependency `A` depends on package `B`, then `B` is resolved and locked as well. + +**Minimum Version Selection (MVS):** When multiple dependency paths request different versions of the same package (a "diamond dependency"), Pipelex selects the minimum version that satisfies all constraints simultaneously. This provides deterministic, reproducible builds. + +**Cycle detection:** Circular dependencies (A depends on B, B depends on A) are detected during resolution and raise an error immediately. + +**Local path dependencies are not recursed:** If a dependency has a `path` field, its own sub-dependencies are not resolved transitively. Only remote dependencies participate in transitive resolution. + +### Package Cache + +Fetched remote packages are stored in a local cache at: + +``` +~/.mthds/packages/{address}/{version}/ +``` + +For example: + +``` +~/.mthds/packages/github.com/acme/scoring-lib/2.0.0/ +``` + +- The `.git/` directory is stripped from cached copies to save space. +- Writes use a staging directory with atomic rename for safety. +- The cache is shared across all your projects β€” a package fetched for one project is available to all others. + +## Exports and Visibility + +The `[exports]` section controls which pipes are visible to other domains. This is the core access-control mechanism of the package system. + +### Default Behavior + +- **Without `METHODS.toml`**: all pipes are public. Any domain can reference any pipe. +- **With `METHODS.toml`**: pipes are **private by default**. Only pipes listed in `[exports]` (and `main_pipe` entries) are accessible from other domains. + +### Declaring Exports + +Exports are organized by domain path. Each entry lists the pipes that domain exposes: + +```toml +[exports.legal.contracts] +pipes = ["extract_clause", "analyze_contract"] + +[exports.scoring] +pipes = ["compute_weighted_score"] +``` + +In this example, the `legal.contracts` domain exports two pipes, and the `scoring` domain exports one. + +### Visibility Rules + +| Reference Type | Visibility Check | +|----------------|-----------------| +| Bare reference (no domain prefix) | Always allowed | +| Same-domain reference | Always allowed | +| Cross-domain to exported pipe | Allowed | +| Cross-domain to `main_pipe` | Allowed (auto-exported) | +| Cross-domain to non-exported pipe | **Blocked** | + +!!! important + A bundle's `main_pipe` is **automatically exported** β€” it is always accessible from other domains, even if it is not listed in the `[exports]` section. + +!!! note "Actionable Error Messages" + Visibility violations are detected at load time. When a pipe reference is blocked, the error message tells you exactly which pipe is inaccessible and suggests adding it to the appropriate `[exports]` section in `METHODS.toml`. + +### Example + +Given two bundles: + +```toml +# contracts.mthds +domain = "legal.contracts" +main_pipe = "review_contract" + +[pipe.extract_clause] +# ... + +[pipe.analyze_contract] +# ... + +[pipe.internal_helper] +# ... +``` + +```toml +# scoring.mthds +domain = "scoring" + +[pipe.compute_weighted_score] +# ... +``` + +And this manifest: + +```toml +[exports.legal.contracts] +pipes = ["extract_clause", "analyze_contract"] +``` + +Then from a different domain (e.g. `reporting`): + +- `legal.contracts.extract_clause` β€” allowed (exported) +- `legal.contracts.analyze_contract` β€” allowed (exported) +- `legal.contracts.review_contract` β€” allowed (auto-exported as `main_pipe`) +- `legal.contracts.internal_helper` β€” **blocked** (not exported) + +## Package Directory Structure + +A typical package layout: + +``` +your-project/ +β”œβ”€β”€ METHODS.toml # Package manifest +β”œβ”€β”€ methods.lock # Lock file (commit to VCS) +β”œβ”€β”€ my_project/ +β”‚ β”œβ”€β”€ finance/ +β”‚ β”‚ β”œβ”€β”€ services.py +β”‚ β”‚ β”œβ”€β”€ invoices.mthds +β”‚ β”‚ └── invoices_struct.py +β”‚ └── legal/ +β”‚ β”œβ”€β”€ contracts.mthds +β”‚ β”œβ”€β”€ contracts_struct.py +β”‚ └── services.py +β”œβ”€β”€ .pipelex/ +β”‚ └── pipelex.toml +└── requirements.txt +``` + +The `METHODS.toml` sits at the project root. Pipelex discovers it by walking up from any `.mthds` file until it finds the manifest (stopping at a `.git` boundary or filesystem root). + +## Quick Start + +**1. Scaffold a manifest** from your existing bundles: + +```bash +pipelex pkg init +``` + +This scans all `.mthds` files in the current directory, discovers domains and pipes, and generates a skeleton `METHODS.toml` with placeholder values. Edit the generated file to set the correct address and tune your exports. + +**2. Add a dependency:** + +```bash +pipelex pkg add github.com/acme/scoring-lib --version "^2.0.0" +``` + +This appends a dependency entry to your `METHODS.toml`. The alias is auto-derived from the address (`scoring_lib`), or you can specify one with `--alias`. + +**3. Lock your dependencies:** + +```bash +pipelex pkg lock +``` + +This resolves all remote dependencies (including transitive ones), computes integrity hashes, and writes `methods.lock`. + +**4. Install dependencies:** + +```bash +pipelex pkg install +``` + +This fetches any packages not already cached and verifies their integrity. + +**5. Inspect the current manifest:** + +```bash +pipelex pkg list +``` + +This displays the package metadata, dependencies, and exports in formatted tables. + +See the [Pkg CLI reference](../9-tools/cli/pkg.md) for full command details. + +## Related Documentation + +- [Domain](./domain.md) β€” How domains organize concepts and pipes +- [Libraries](./libraries.md) β€” How libraries load and validate bundles +- [Pipelex Bundle Specification](./pipelex-bundle-specification.md) β€” The `.mthds` file format +- [Refining Concepts](./concepts/refining-concepts.md) β€” How to specialize concepts, including cross-package refinement +- [Pkg CLI](../9-tools/cli/pkg.md) β€” CLI commands for package management diff --git a/docs/home/6-build-reliable-ai-workflows/pipe-builder.md b/docs/home/6-build-reliable-ai-workflows/pipe-builder.md index 364cbce44..543dfb072 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipe-builder.md +++ b/docs/home/6-build-reliable-ai-workflows/pipe-builder.md @@ -3,7 +3,7 @@ Pipelex provides powerful tools to automatically generate complete, working pipelines from natural language requirements. This feature leverages AI to translate your ideas into fully functional pipeline code, dramatically speeding up development. !!! tip "Pipe Builder Requirements" - For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key or see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. Otherwise, you can also create the workflows yourself, following our [documentation guide](./kick-off-a-pipelex-workflow-project.md). + For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key or see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. Otherwise, you can also create the methods yourself, following our [documentation guide](./kick-off-a-methods-project.md). ## Overview @@ -23,7 +23,7 @@ This command runs a validation/fix loop to ensure the generated pipeline is corr By default, the build command creates a numbered directory with three files: -1. **`bundle.plx`** - Your complete pipeline definition with domain, concepts, and pipes +1. **`bundle.mthds`** - Your complete pipeline definition with domain, concepts, and pipes 2. **`inputs.json`** - A pre-filled template showing the inputs your pipeline expects 3. **`run_{pipe_code}.py`** - A ready-to-run Python script you can customize and execute @@ -39,7 +39,7 @@ pipelex build pipe "Take a photo as input, and render the opposite of the photo" pipelex build pipe "Take a photo as input, and render the opposite of the photo" \ -o photo_inverter -# Single file only: creates results/photo_inverter_01.plx +# Single file only: creates results/photo_inverter_01.mthds pipelex build pipe "Take a photo as input, and render the opposite of the photo" \ -o photo_inverter --no-extras @@ -52,7 +52,7 @@ pipelex build pipe "Take a photo as input, and render the opposite of the photo" - `-o, --output-name`: Base name for the generated file or directory (without extension) - `--output-dir`: Directory where files will be generated (default: `results`) -- `--no-extras`: Skip generating `inputs.json` and runner, only generate the `.plx` bundle +- `--no-extras`: Skip generating `inputs.json` and runner, only generate the `.mthds` bundle - `--no-output`: Build the pipeline but don't save any files ## Quick Start Example @@ -97,7 +97,7 @@ When you run a build command, Pipelex automatically creates: - **Domain definition**: The namespace for your pipeline - **Concepts**: Structured data types for inputs and outputs - **Pipes**: The processing steps and LLM operations -- **Python structures**: When structured output is needed (saved alongside the `.plx` file with `_struct.py` suffix) +- **Python structures**: When structured output is needed (saved alongside the `.mthds` file with `_struct.py` suffix) All generated pipelines follow Pipelex best practices and conventions automatically. @@ -105,10 +105,10 @@ All generated pipelines follow Pipelex best practices and conventions automatica After generating your pipeline: -1. **Review the generated `.plx` file** to understand the structure +1. **Review the generated `.mthds` file** to understand the structure 2. **Test the pipeline** using the generated example code 3. **Iterate if needed** by modifying the natural language description and regenerating -4. **Customize** the pipeline by editing the `.plx` file directly for fine-tuning +4. **Customize** the pipeline by editing the `.mthds` file directly for fine-tuning ## How It Works @@ -169,7 +169,7 @@ For each pipe signature, generates the complete specification: Finally, the builder: - Names the domain based on your brief - Assembles all concepts and pipes into a complete bundle -- Generates the `.plx` file with proper syntax +- Generates the `.mthds` file with proper syntax - Creates Python structure files (`*_struct.py`) when needed - Validates the pipeline and fixes deterministic issues @@ -177,9 +177,9 @@ Finally, the builder: Want to see how the Pipe Builder works internally? Check out the source code: -- **Main pipeline**: [`pipelex/builder/builder.plx`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/builder.plx) -- **Pipe design**: [`pipelex/builder/pipe/pipe_design.plx`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/pipe/pipe_design.plx) -- **Concept building**: [`pipelex/builder/concept/concept.plx`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/concept/concept.plx) +- **Main pipeline**: [`pipelex/builder/builder.mthds`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/builder.mthds) +- **Pipe design**: [`pipelex/builder/pipe/pipe_design.mthds`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/pipe/pipe_design.mthds) +- **Concept building**: [`pipelex/builder/concept/concept.mthds`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/concept/concept.mthds) The Pipe Builder is a great example of a complex, multi-stage Pipelex pipeline in action. diff --git a/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md b/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md index 695631852..f36b33d15 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md +++ b/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md @@ -1,10 +1,10 @@ # Pipelex Bundle Specification -A **Pipelex bundle** is the fundamental unit of organization in Pipelex. It's a single `.plx` file that defines a cohesive set of concepts and pipes for a specific domain of work. +A **Pipelex bundle** is the fundamental unit of organization in Pipelex. It's a single `.mthds` file that defines a cohesive set of concepts and pipes for a specific domain of work. ## What is a Pipelex Bundle? -A Pipelex bundle (`.plx` file) brings together: +A Pipelex bundle (`.mthds` file) brings together: - **Domain declaration** - The semantic namespace for all concepts and pipes in this bundle - **Concepts** - The knowledge structures that flow through your pipes (optional) @@ -12,9 +12,9 @@ A Pipelex bundle (`.plx` file) brings together: Think of a bundle as a self-contained module that solves a specific problem domain. For example, you might have: -- `invoice_processing.plx` - Bundle for invoice extraction and validation -- `marketing.plx` - Bundle for generating marketing content -- `document_analysis.plx` - Bundle for analyzing documents +- `invoice_processing.mthds` - Bundle for invoice extraction and validation +- `marketing.mthds` - Bundle for generating marketing content +- `document_analysis.mthds` - Bundle for analyzing documents ## Bundle Structure @@ -50,7 +50,7 @@ Every bundle **must** declare a domain. Only the `domain` field is mandatory; al ```toml domain = "invoice_processing" description = "Tools for extracting and validating invoice data" -source = "path/to/invoice_processing.plx" +source = "path/to/invoice_processing.mthds" system_prompt = "You are an expert in financial document processing." main_pipe = "extract_and_validate_invoice" ``` @@ -242,5 +242,5 @@ prompt = "..." - [Understanding Domains](./domain.md) - Deep dive into domain organization - [Designing Pipelines](./pipes/index.md) - Learn how to design and compose pipes - [Define Your Concepts](./concepts/define_your_concepts.md) - Complete guide to concept definitions -- [Kick off a Pipelex Workflow Project](./kick-off-a-pipelex-workflow-project.md) - Start a new project +- [Kick off a Pipelex Method Project](./kick-off-a-methods-project.md) - Start a new project diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md b/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md index 5bdeda873..b0d55a665 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md @@ -1,38 +1,40 @@ # Executing Pipelines -Once your pipes are defined in `.plx` files, you can execute them in multiple ways. +Once your pipes are defined in `.mthds` files, you can execute them in multiple ways. ## The Simplest Approach: Run a Bundle File -The easiest way to execute a pipeline is to point directly to your `.plx` bundle file. No library configuration needed. +The easiest way to execute a pipeline is to point directly to your `.mthds` bundle file. No library configuration needed. ### Using the CLI ```bash # Run the bundle's main_pipe -pipelex run path/to/my_bundle.plx +pipelex run path/to/my_bundle.mthds # Run a specific pipe from the bundle -pipelex run path/to/my_bundle.plx --pipe my_specific_pipe +pipelex run path/to/my_bundle.mthds --pipe my_specific_pipe # Run with inputs -pipelex run path/to/my_bundle.plx --inputs inputs.json +pipelex run path/to/my_bundle.mthds --inputs inputs.json ``` !!! tip "Preparing Inputs" - You can generate an input template with `pipelex build inputs path/to/my_bundle.plx`, which creates a `results/inputs.json` file with the required input structure. + You can generate an input template with `pipelex build inputs path/to/my_bundle.mthds`, which creates a `results/inputs.json` file with the required input structure. ### Using Python ```python from pipelex.pipelex import Pipelex -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner Pipelex.make() # Run the bundle's main_pipe -pipe_output = await execute_pipeline( +runner = PipelexRunner( bundle_uri="path/to/my_bundle.plx", +) +response = await runner.execute_pipeline( inputs={ "my_input": { "concept": "Text", @@ -40,17 +42,21 @@ pipe_output = await execute_pipeline( }, }, ) +pipe_output = response.pipe_output # Or run a specific pipe from the bundle -pipe_output = await execute_pipeline( +runner = PipelexRunner( bundle_uri="path/to/my_bundle.plx", +) +response = await runner.execute_pipeline( pipe_code="my_specific_pipe", inputs={...}, ) +pipe_output = response.pipe_output ``` !!! info "How `main_pipe` Works" - When you run a bundle without specifying a `pipe_code`, Pipelex executes the bundle's `main_pipe` (declared at the top of the `.plx` file). If no `main_pipe` is defined and no `pipe_code` is provided, an error is raised. + When you run a bundle without specifying a `pipe_code`, Pipelex executes the bundle's `main_pipe` (declared at the top of the `.mthds` file). If no `main_pipe` is defined and no `pipe_code` is provided, an error is raised. If you provide both `bundle_uri` and `pipe_code`, the explicit `pipe_code` takes priority over `main_pipe`. @@ -72,13 +78,13 @@ When executing pipelines programmatically, Pipelex can load pipe **libraries** - #### Library Parameters -When using `execute_pipeline` or `start_pipeline`, you can control library behavior with these parameters: +When using `PipelexRunner`, you can control library behavior with these constructor parameters: - **`library_id`**: A unique identifier for the library instance. If not specified, it defaults to the `pipeline_run_id` (a unique ID generated for each pipeline execution). -- **`library_dirs`**: A list of directory paths to load pipe definitions from. **These directories must contain both your `.plx` files AND any Python files defining `StructuredContent` classes** (e.g., `*_struct.py` files). If not specified, Pipelex falls back to the `PIPELEXPATH` environment variable, then to the current working directory. +- **`library_dirs`**: A list of directory paths to load pipe definitions from. **These directories must contain both your `.mthds` files AND any Python files defining `StructuredContent` classes** (e.g., `*_struct.py` files). If not specified, Pipelex falls back to the `PIPELEXPATH` environment variable, then to the current working directory. -- **`plx_content`**: When provided, Pipelex will load only this PLX content into the library, bypassing directory scanning. This is useful for dynamic pipeline execution without file-based definitions. +- **`mthds_content`**: When provided to `PipelexRunner.execute_pipeline()`, Pipelex will load only this content into the library, bypassing directory scanning. This is useful for dynamic pipeline execution without file-based definitions. !!! info "Python Structure Classes" If your concepts use Python `StructuredContent` classes instead of inline structures, those Python files must be in the directories specified by `library_dirs`. Pipelex auto-discovers and registers these classes during library loading. Learn more about [Python StructuredContent Classes](../concepts/python-classes.md). @@ -97,13 +103,14 @@ This approach loads pipe definitions from directories and executes a specific pi ```python from pipelex.pipelex import Pipelex -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner # First, initialize Pipelex (this loads all pipeline definitions) Pipelex.make() # Execute the pipeline and wait for the result -pipe_output = await execute_pipeline( +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="description_to_tagline", inputs={ "description": { @@ -112,15 +119,18 @@ pipe_output = await execute_pipeline( }, }, ) +pipe_output = response.pipe_output ``` **Loading from specific directories**: ```python # Load pipes from specific directories -pipe_output = await execute_pipeline( - pipe_code="description_to_tagline", +runner = PipelexRunner( library_dirs=["./pipelines", "./shared_pipes"], +) +response = await runner.execute_pipeline( + pipe_code="description_to_tagline", inputs={ "description": { "concept": "ProductDescription", @@ -128,16 +138,19 @@ pipe_output = await execute_pipeline( }, }, ) +pipe_output = response.pipe_output ``` **Using a custom library ID**: ```python # Use a custom library ID for managing multiple library instances -pipe_output = await execute_pipeline( - pipe_code="description_to_tagline", +runner = PipelexRunner( library_id="my_marketing_library", library_dirs=["./marketing_pipes"], +) +response = await runner.execute_pipeline( + pipe_code="description_to_tagline", inputs={ "description": { "concept": "ProductDescription", @@ -145,18 +158,19 @@ pipe_output = await execute_pipeline( }, }, ) +pipe_output = response.pipe_output ``` !!! tip "Listing available pipes" Use the `pipelex show pipes` command to list all the pipes available in your project. -### Using PLX Content Directly +### Using MTHDS Content Directly -You can directly pass PLX content as a string to `execute_pipeline`, useful for dynamic pipeline execution without file-based definitions. +You can directly pass PLX content as a string to `PipelexRunner.execute_pipeline()`, useful for dynamic pipeline execution without file-based definitions. ```python from pipelex.pipelex import Pipelex -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner my_pipe_content = """ domain = "marketing" @@ -181,8 +195,9 @@ Generate a catchy tagline based on the above description. The tagline should be Pipelex.make() -pipe_output = await execute_pipeline( - plx_content=my_pipe_content, +runner = PipelexRunner() +response = await runner.execute_pipeline( + mthds_content=my_pipe_content, inputs={ "description": { "concept": "ProductDescription", @@ -190,10 +205,11 @@ pipe_output = await execute_pipeline( }, }, ) +pipe_output = response.pipe_output ``` !!! note "Pipe Code Resolution" - When using `plx_content`: + When using `mthds_content`: - If the content has a `main_pipe` property and you don't provide `pipe_code`, the `main_pipe` is executed - If you provide `pipe_code`, it overrides `main_pipe` @@ -219,7 +235,7 @@ Pipelex.make() # Start the pipeline without waiting pipeline_run_id, task = await start_pipeline( - bundle_uri="path/to/my_bundle.plx", + bundle_uri="path/to/my_bundle.mthds", inputs={ "description": { "concept": "ProductDescription", diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/index.md b/docs/home/6-build-reliable-ai-workflows/pipes/index.md index 08e6d0a4e..ad466e927 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/index.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/index.md @@ -1,19 +1,19 @@ # Designing Pipelines -In Pipelex, a pipeline is not just a rigid sequence of steps; it's a dynamic and intelligent workflow built by composing individual, reusable components called **pipes**. This approach allows you to break down complex AI tasks into manageable, testable, and reliable units. +In Pipelex, a pipeline is not just a rigid sequence of steps; it's a dynamic and intelligent method built by composing individual, reusable components called **pipes**. This approach allows you to break down complex AI tasks into manageable, testable, and reliable units. This guide provides an overview of how to design your pipelines. ## The Building Blocks: Pipes -A pipeline is composed of pipes. There are two fundamental types of pipes you will use to build your workflows: +A pipeline is composed of pipes. There are two fundamental types of pipes you will use to build your methods: * **[Pipe Operators](./pipe-operators/index.md)**: These are the "workers" of your pipeline. They perform concrete actions like calling an LLM (`PipeLLM`), extracting text from a document (`PipeExtract`), or running a Python function (`PipeFunc`). Each operator is a specialized tool designed for a specific task. -* **[Pipe Controllers](./pipe-controllers/index.md)**: These are the "managers" of your pipeline. They don't perform tasks themselves but orchestrate the execution flow of other pipes. They define the logic of your workflow, such as running pipes in sequence (`PipeSequence`), in parallel (`PipeParallel`), or based on a condition (`PipeCondition`). +* **[Pipe Controllers](./pipe-controllers/index.md)**: These are the "managers" of your pipeline. They don't perform tasks themselves but orchestrate the execution flow of other pipes. They define the logic of your method, such as running pipes in sequence (`PipeSequence`), in parallel (`PipeParallel`), or based on a condition (`PipeCondition`). -## Designing a Pipeline: Composition in PLX +## Designing a Pipeline: Composition in MTHDS -The most common way to design a pipeline is by defining and composing pipes in a `.plx` configuration file. This provides a clear, declarative way to see the structure of your workflow. +The most common way to design a pipeline is by defining and composing pipes in a `.mthds` configuration file. This provides a clear, declarative way to see the structure of your method. Each pipe, whether it's an operator or a controller, is defined in its own `[pipe.]` table. The `` becomes the unique identifier for that pipe. @@ -36,13 +36,13 @@ Each pipe, whether it's an operator or a controller, is defined in its own `[pip ❌ [pipe.GENERATE_TAGLINE] # All caps not allowed ``` -Let's look at a simple example. Imagine we want a workflow that: +Let's look at a simple example. Imagine we want a method that: 1. Takes a product description. 2. Generates a short, catchy marketing tagline for it. We can achieve this with a `PipeLLM` operator. -`marketing_pipeline.plx` +`marketing_pipeline.mthds` ```toml domain = "marketing" description = "Marketing content generation domain" @@ -75,7 +75,7 @@ The output concept is very important. Indeed, the output of your pipe will be co ### Understanding the Pipe Contract -Every pipe defines a **contract** through its `inputs` and `output` fields. This contract is fundamental to how Pipelex ensures reliability in your workflows: +Every pipe defines a **contract** through its `inputs` and `output` fields. This contract is fundamental to how Pipelex ensures reliability in your methods: * **`inputs`**: This dictionary defines the **mandatory and necessary** data that must be present in the [Working Memory](working-memory.md) before the pipe can execute. Each key in the dictionary becomes a variable name that you can reference in your pipe's logic (e.g., in prompts), and each value specifies the concept type that the data must conform to. If any required input is missing or doesn't match the expected concept, the pipeline will fail a clear error message. You can specify multiple inputs by using a list of concepts. For example, `inputs = { description = "ProductDescription", keywords = "Keyword[]" }` will require a `ProductDescription` and a list of `Keyword`s. (See more about [Understanding Multiplicity](./understanding-multiplicity.md) for details.) @@ -83,12 +83,12 @@ You can specify multiple inputs by using a list of concepts. For example, `input * **`output`**: This field declares what the pipe will produce. The output will always be an instance of the specified concept. The structure and type of the output depend on the concept definition (See more about concepts [here](../concepts/native-concepts.md)). * You can specify **multiple outputs** using bracket notation (e.g., `Keyword[]` for a variable list, or `Image[3]` for exactly 3 images) -### Multi-Step Workflows +### Multi-Step Methods -To create a multi-step workflow, you use a controller. The `PipeSequence` controller is the most common one. It executes a series of pipes in a specific order. +To create a multi-step method, you use a controller. The `PipeSequence` controller is the most common one. It executes a series of pipes in a specific order. -`marketing_pipeline.plx` +`marketing_pipeline.mthds` ```toml domain = "marketing" description = "Marketing content generation domain" diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md index 0d8496d5f..05c979cf9 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md @@ -16,9 +16,9 @@ This is the ideal controller for processing collections of documents, images, or ## Configuration -`PipeBatch` is configured in your pipeline's `.plx` file. +`PipeBatch` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ------------------ | ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md index beb94532e..d459e9827 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md @@ -13,9 +13,9 @@ The `PipeCondition` controller adds branching logic to your pipelines. It evalua ## Configuration -`PipeCondition` is configured in your pipeline's `.plx` file. +`PipeCondition` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ------------------------------ | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md index 9bff1cb53..9c58bd5b3 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md @@ -16,9 +16,9 @@ You must use `add_each_output`, `combined_output`, or both. ## Configuration -`PipeParallel` is configured in your pipeline's `.plx` file. +`PipeParallel` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ----------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | @@ -26,13 +26,13 @@ You must use `add_each_output`, `combined_output`, or both. | `description` | string | A description of the parallel operation. | Yes | | `inputs` | dictionary | The input concept(s) for the parallel operation, as a dictionary mapping input names to concept codes. | Yes | | `output` | string | The output concept produced by the parallel operation. | Yes | -| `parallels` | array of tables| An array defining the pipes to run in parallel. Each table is a sub-pipe definition. | Yes | +| `branches` | array of tables| An array defining the pipes to run in parallel. Each table is a sub-pipe definition. | Yes | | `add_each_output` | boolean | If `true`, adds the output of each parallel pipe to the working memory individually. Defaults to `true`. | No | -| `combined_output` | string | The name of a concept to use for a single, combined output object. The structure of this concept must have fields that match the `result` names from the `parallels` array. | No | +| `combined_output` | string | The name of a concept to use for a single, combined output object. The structure of this concept must have fields that match the `result` names from the `branches` array. | No | ### Parallel Step Configuration -Each entry in the `parallels` array is a table with the following keys: +Each entry in the `branches` array is a table with the following keys: | Key | Type | Description | Required | | -------- | ------ | ---------------------------------------------------------------------------------------- | -------- | @@ -67,7 +67,7 @@ inputs = { description = "ProductDescription" } output = "ProductAnalysis" # This name is for the combined output add_each_output = true combined_output = "ProductAnalysis" -parallels = [ +branches = [ { pipe = "extract_features", result = "features" }, { pipe = "analyze_sentiment", result = "sentiment" }, ] diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md index 0ca243123..8249dd9d8 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md @@ -1,6 +1,6 @@ # PipeSequence -The `PipeSequence` controller is used to execute a series of pipes one after another. It is the fundamental building block for creating linear workflows where the output of one step becomes the input for the next. +The `PipeSequence` controller is used to execute a series of pipes one after another. It is the fundamental building block for creating linear methods where the output of one step becomes the input for the next. ## How it works @@ -12,9 +12,9 @@ A `PipeSequence` defines a list of `steps`. Each step calls another pipe and giv ## Configuration -`PipeSequence` is configured in your pipeline's `.plx` file. +`PipeSequence` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ---------- | --------------- | -------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md index 48b56f70b..0cb7e229a 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md @@ -1,13 +1,13 @@ # Pipe Controllers -Pipe controllers are the orchestrators of a Pipelex pipeline. While [Pipe Operators](../pipe-operators/index.md) perform the work, pipe controllers define the workflow and manage the execution logic. They allow you to run other pipes in sequence, in parallel, or conditionally. +Pipe controllers are the orchestrators of a Pipelex pipeline. While [Pipe Operators](../pipe-operators/index.md) perform the work, pipe controllers define the method and manage the execution logic. They allow you to run other pipes in sequence, in parallel, or conditionally. ## Core Controllers Here are the primary pipe controllers available in Pipelex: - [**`PipeSequence`**](./PipeSequence.md): The most fundamental controller. It runs a series of pipes one after another, passing the results from one step to the next. -- [**`PipeParallel`**](./PipeParallel.md): Executes multiple independent pipes at the same time, significantly speeding up workflows where tasks don't depend on each other. +- [**`PipeParallel`**](./PipeParallel.md): Executes multiple independent pipes at the same time, significantly speeding up methods where tasks don't depend on each other. - [**`PipeBatch`**](./PipeBatch.md): Performs a "map" operation. It takes a list of items and runs the same pipe on every single item in parallel. - [**`PipeCondition`**](./PipeCondition.md): Adds branching logic (`if/else`) to your pipeline. It evaluates an expression and chooses which pipe to run next based on the result. diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md index 942d847ed..77e9cd8f2 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md @@ -19,7 +19,7 @@ The `PageContent` object has the following structure: ## Configuration -`PipeExtract` is configured in your pipeline's `.plx` file. +`PipeExtract` is configured in your pipeline's `.mthds` file. ### OCR Models and Backend System @@ -37,7 +37,7 @@ Common OCR model handles: OCR presets are defined in your model deck configuration and can include parameters like `max_nb_images` and `image_min_size`. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | --------------------------- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md index 355bb4401..2d72f6d74 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md @@ -81,13 +81,13 @@ async def concatenate_texts(working_memory: WorkingMemory) -> TextContent: pass ``` -Then use `function_name = "custom_concat"` in your `.plx` file. +Then use `function_name = "custom_concat"` in your `.mthds` file. ## Configuration -Once the function is registered, you can use it in your `.plx` file. +Once the function is registered, you can use it in your `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | --------------- | ------ | --------------------------------------------------------------------------- | -------- | @@ -98,7 +98,7 @@ Once the function is registered, you can use it in your `.plx` file. ### Example -This PLX snippet shows how to use the `concatenate_texts` function defined above. It assumes two previous pipes have produced outputs named `text_a` and `text_b`. +This MTHDS snippet shows how to use the `concatenate_texts` function defined above. It assumes two previous pipes have produced outputs named `text_a` and `text_b`. ```toml [pipe.combine_them] diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md index db438bb6c..8c1e54b92 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md @@ -10,7 +10,7 @@ The pipe can be configured to generate a single image or a list of images. ## Configuration -`PipeImgGen` is configured in your pipeline's `.plx` file. +`PipeImgGen` is configured in your pipeline's `.mthds` file. ### The `prompt` Field is Required @@ -55,7 +55,7 @@ Common image generation model handles: Image generation presets are defined in your model deck configuration and can include parameters like `quality`, `guidance_scale`, and `safety_tolerance`. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ----------------------- | --------------- | ----------------------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md index c558f9548..987d083b6 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md @@ -204,9 +204,9 @@ Analyze the document and explain how it relates to the context: $reference_doc ## Configuration -`PipeLLM` is configured in your pipeline's `.plx` file. +`PipeLLM` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | --------------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md index 8a131c38a..9b1daeb01 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md @@ -2,7 +2,7 @@ Pipe operators are the fundamental building blocks in Pipelex, representing a single, focused task. They are the "verbs" of your pipeline that perform the actual work. -Each operator specializes in a specific kind of action, from interacting with Large Language Models to executing custom Python code. You combine these operators using [Pipe Controllers](../pipe-controllers/index.md) to create complex workflows. +Each operator specializes in a specific kind of action, from interacting with Large Language Models to executing custom Python code. You combine these operators using [Pipe Controllers](../pipe-controllers/index.md) to create complex methods. ## Core Operators diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md index 8fc5ad331..a8857dde9 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md @@ -51,7 +51,7 @@ invoice = pipe_output.main_stuff_as(content_type=Invoice) ### Option 2: Inline Structure -If the output concept was defined with [inline structures](../concepts/inline-structures.md) directly in the `.plx` file, the generated class is not importable. Use the `PipeOutput` accessor methods instead: +If the output concept was defined with [inline structures](../concepts/inline-structures.md) directly in the `.mthds` file, the generated class is not importable. Use the `PipeOutput` accessor methods instead: ```python pipe_output = await execute_pipeline( @@ -176,6 +176,6 @@ This allows you to access intermediate results from multi-step pipelines. See [W - [Working Memory](working-memory.md) - Understanding data flow between pipes - [Executing Pipelines](executing-pipelines.md) - How to run pipelines -- [Inline Structures](../concepts/inline-structures.md) - Defining structures in `.plx` files +- [Inline Structures](../concepts/inline-structures.md) - Defining structures in `.mthds` files - [Python StructuredContent Classes](../concepts/python-classes.md) - Defining structures in Python diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md b/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md index 9936664fb..8ca6c3b9e 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md @@ -7,13 +7,13 @@ When running Pipelex pipelines, you need to provide input data that matches what The Pipelex CLI can generate a template JSON file with all the required inputs for your pipeline: ```bash -pipelex build inputs path/to/my_pipe.plx +pipelex build inputs path/to/my_pipe.mthds ``` This creates a `results/inputs.json` file with the structure needed for your pipeline. You can then fill in the values and use it with: ```bash -pipelex run path/to/my_pipe.plx --inputs results/inputs.json +pipelex run path/to/my_pipe.mthds --inputs results/inputs.json ``` See more about the options of the CLI [here](../../9-tools/cli/index.md). diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md b/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md index c813942a2..6e92581fd 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md @@ -1,6 +1,6 @@ # Understanding Multiplicity -Multiplicity in Pipelex defines how many items a particular stuff can comprise in a particular context. This applies to any of the pipe input variables and also to the output of the pipe. This idea is fundamental to building flexible AI workflows that can handle both single items and collections. +Multiplicity in Pipelex defines how many items a particular stuff can comprise in a particular context. This applies to any of the pipe input variables and also to the output of the pipe. This idea is fundamental to building flexible AI methods that can handle both single items and collections. This guide explains the philosophy behind multiplicity in Pipelex and how to use it effectively in your pipelines. @@ -23,7 +23,7 @@ Each of these definitions describes a single, coherent entity. The essence of wh ### Lists Are Circumstantial, Not Essential -The number of items you're working with is a circumstantial detail of your workflow, not part of the concept's identity: +The number of items you're working with is a circumstantial detail of your method, not part of the concept's identity: - A pipe that extracts keywords from text might find 3 keywords or 30β€”but each is still a `Keyword` - A pipe that generates product ideas might produce 5 ideas or 10β€”but each remains a `ProductIdea` @@ -355,7 +355,7 @@ Use variable input multiplicity when: - The pipe should handle batches of unknown size - You're aggregating or summarizing multiple items -- The workflow involves collecting items before processing +- The method involves collecting items before processing - You want maximum flexibility in how the pipe is called ### When to Use Fixed Input (Brackets with Number `[N]`) @@ -372,13 +372,15 @@ Use fixed input multiplicity when: When a pipe produces multiple outputs, Pipelex automatically wraps them in a `ListContent` container. This container maintains the type information: ```python -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner # Execute a pipe with multiple outputs -pipe_output = await execute_pipeline( +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="extract_line_items", inputs={"invoice_text": "Your invoice text here..."} ) +pipe_output = response.pipe_output # Get the list of line items line_items_list = pipe_output.main_stuff_as_items(item_type=LineItem) @@ -392,9 +394,11 @@ line_items_container = pipe_output.main_stuff_as_list(item_type=LineItem) Similarly, when providing multiple inputs, you can use lists: ```python +from pipelex.pipeline.runner import PipelexRunner from pipelex.core.stuffs.text_content import TextContent -pipe_output = await execute_pipeline( +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="summarize_all_documents", inputs={ "documents": [ @@ -404,6 +408,7 @@ pipe_output = await execute_pipeline( ] } ) +pipe_output = response.pipe_output ``` ## Summary diff --git a/docs/home/7-configuration/config-technical/inference-backend-config.md b/docs/home/7-configuration/config-technical/inference-backend-config.md index eb5554566..9ac2f24f4 100644 --- a/docs/home/7-configuration/config-technical/inference-backend-config.md +++ b/docs/home/7-configuration/config-technical/inference-backend-config.md @@ -438,7 +438,7 @@ default-large-context-text = "gemini-2.5-flash" default-small = "gemini-2.5-flash-lite" ``` -When using aliases in `.plx` files or other configurations, prefix them with `@`: +When using aliases in `.mthds` files or other configurations, prefix them with `@`: ```toml model = "@best-claude" # References the best-claude alias @@ -468,7 +468,7 @@ vision-cheap = { model = "@default-small-vision", temperature = 0.5 } vision-diagram = { model = "@default-premium-vision", temperature = 0.3 } ``` -When using presets in `.plx` files, prefix them with `$`: +When using presets in `.mthds` files, prefix them with `$`: ```toml model = "$engineering-structured" # Uses preset for structured extraction @@ -486,7 +486,7 @@ Extract presets combine document extraction model selection with optimized param extract-testing = { model = "@default-extract-document", max_nb_images = 5, image_min_size = 50 } ``` -You can also use aliases directly in `.plx` files for document extraction: +You can also use aliases directly in `.mthds` files for document extraction: ```toml model = "@default-extract-document" # Uses default document extraction alias @@ -505,7 +505,7 @@ gen-image-fast = { model = "@default-small", quality = "low" } gen-image-high-quality = { model = "@default-premium", quality = "high" } ``` -When using image generation presets in `.plx` files, prefix them with `$`: +When using image generation presets in `.mthds` files, prefix them with `$`: ```toml model = "$gen-image" # Uses default image generation preset @@ -558,7 +558,7 @@ small-llm = ["gemini-2.5-flash-lite", "gpt-4o-mini", "claude-3-haiku"] document_extractor = ["azure-document-intelligence", "mistral-document-ai-2505"] ``` -When using waterfalls in `.plx` files, prefix them with `~`: +When using waterfalls in `.mthds` files, prefix them with `~`: ```toml model = "~premium-llm" # Will try claude-4.5-opus, then gemini-3.0-pro, then gpt-5.2 diff --git a/docs/home/7-configuration/config-technical/library-config.md b/docs/home/7-configuration/config-technical/library-config.md index 25ba0a450..c19a34c8f 100644 --- a/docs/home/7-configuration/config-technical/library-config.md +++ b/docs/home/7-configuration/config-technical/library-config.md @@ -1,8 +1,8 @@ # Pipeline Discovery and Loading -When running pipelines, Pipelex needs to find your `.plx` bundle files. There are two approaches: +When running pipelines, Pipelex needs to find your `.mthds` bundle files. There are two approaches: -1. **Point to the bundle file directly** - The simplest option. Just pass the path to your `.plx` file. No configuration needed. +1. **Point to the bundle file directly** - The simplest option. Just pass the path to your `.mthds` file. No configuration needed. 2. **Configure library directories** - For larger projects. Pipelex scans directories to discover all bundles, letting you reference pipes by code. @@ -10,35 +10,43 @@ Most users should start with the first approach. ## The Simplest Way: Use the Bundle Path Directly -If you just want to run a pipe from a single `.plx` file, **you don't need any library configuration**. Simply point to your bundle file: +If you just want to run a pipe from a single `.mthds` file, **you don't need any library configuration**. Simply point to your bundle file: ```bash # CLI: run the bundle's main_pipe -pipelex run path/to/my_bundle.plx +pipelex run path/to/my_bundle.mthds # CLI: run a specific pipe from the bundle -pipelex run path/to/my_bundle.plx --pipe my_pipe +pipelex run path/to/my_bundle.mthds --pipe my_pipe ``` ```python +from pipelex.pipeline.runner import PipelexRunner + # Python: run the bundle's main_pipe -pipe_output = await execute_pipeline( +runner = PipelexRunner( bundle_uri="path/to/my_bundle.plx", +) +response = await runner.execute_pipeline( inputs={...}, ) +pipe_output = response.pipe_output # Python: run a specific pipe from the bundle -pipe_output = await execute_pipeline( +runner = PipelexRunner( bundle_uri="path/to/my_bundle.plx", +) +response = await runner.execute_pipeline( pipe_code="my_pipe", inputs={...}, ) +pipe_output = response.pipe_output ``` This is the recommended approach for newcomers and simple projects. Pipelex reads the file directly - no discovery needed. !!! tip "When to use library directories" - The library directory configuration below is useful when you have **multiple bundles across different directories** and want to reference pipes by code without specifying the bundle path each time. For most use cases, pointing to the `.plx` file directly is simpler. + The library directory configuration below is useful when you have **multiple bundles across different directories** and want to reference pipes by code without specifying the bundle path each time. For most use cases, pointing to the `.mthds` file directly is simpler. --- @@ -46,7 +54,7 @@ This is the recommended approach for newcomers and simple projects. Pipelex read When you initialize Pipelex with `Pipelex.make()`, the system: -1. **Scans your project directory** for all `.plx` files +1. **Scans your project directory** for all `.mthds` files 2. **Discovers Python structure classes** that inherit from `StructuredContent` 3. **Loads pipeline definitions** including domains, concepts, and pipes 4. **Registers custom functions** decorated with `@pipe_func()` @@ -55,7 +63,7 @@ All of this happens automatically - no configuration needed. ## Configuring Library Directories -When executing pipelines, Pipelex needs to know where to find your `.plx` files and Python structure classes. You can configure this using a **3-tier priority system** that gives you flexibility from global defaults to per-execution overrides. +When executing pipelines, Pipelex needs to know where to find your `.mthds` files and Python structure classes. You can configure this using a **3-tier priority system** that gives you flexibility from global defaults to per-execution overrides. ### The 3-Tier Priority System @@ -68,7 +76,7 @@ Pipelex resolves library directories using this priority order (highest to lowes | **3 (Fallback)** | `PIPELEXPATH` environment variable | System-wide or shell session default | !!! info "Empty List is Valid" - Passing an empty list `[]` to `library_dirs` is a valid explicit value that **disables** directory-based library loading. This is useful when using `plx_content` directly without needing files from the filesystem. + Passing an empty list `[]` to `library_dirs` is a valid explicit value that **disables** directory-based library loading. This is useful when using `mthds_content` directly without needing files from the filesystem. ### Using the PIPELEXPATH Environment Variable @@ -119,7 +127,7 @@ pipelex run my_pipe -L /path/to/pipelines pipelex run my_pipe -L /path/to/shared_pipes -L /path/to/project_pipes # Combined with other options -pipelex run my_bundle.plx --inputs data.json -L /path/to/pipelines +pipelex run my_bundle.mthds --inputs data.json -L /path/to/pipelines # Available on multiple commands pipelex validate --all -L /path/to/pipelines/dir @@ -132,31 +140,33 @@ pipelex which my_pipe -L /path/to/pipelines/dir ### Setting Instance Defaults with `Pipelex.make()` -For Python applications, you can set a default library directory when initializing Pipelex. This default will be used for all subsequent `execute_pipeline` calls unless overridden. +For Python applications, you can set a default library directory when initializing Pipelex. This default will be used for all subsequent `PipelexRunner.execute_pipeline()` calls unless overridden. ```python from pipelex.pipelex import Pipelex -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner # Set instance-level defaults at initialization Pipelex.make( library_dirs=["/path/to/shared_pipes", "/path/to/project_pipes"] ) -# All execute_pipeline calls will use these directories by default -pipe_output = await execute_pipeline( +# All PipelexRunner.execute_pipeline() calls will use these directories by default +runner = PipelexRunner() +response = await runner.execute_pipeline( pipe_code="my_pipe", inputs={"input": "value"}, ) +pipe_output = response.pipe_output ``` -### Per-Call Override with `execute_pipeline` +### Per-Runner Override with `PipelexRunner` -For maximum flexibility, you can override library directories on each `execute_pipeline` call: +For maximum flexibility, you can override library directories on each `PipelexRunner` instance: ```python from pipelex.pipelex import Pipelex -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner # Initialize with default directories Pipelex.make( @@ -164,24 +174,32 @@ Pipelex.make( ) # Use the default directories -output1 = await execute_pipeline( +runner1 = PipelexRunner() +response1 = await runner1.execute_pipeline( pipe_code="default_pipe", inputs={"input": "value"}, ) +pipe_output1 = response1.pipe_output # Override for a specific execution -output2 = await execute_pipeline( - pipe_code="special_pipe", +runner2 = PipelexRunner( library_dirs=["/path/to/special_pipes"], # Overrides instance default +) +response2 = await runner2.execute_pipeline( + pipe_code="special_pipe", inputs={"input": "value"}, ) +pipe_output2 = response2.pipe_output -# Disable directory loading (use only plx_content) -output3 = await execute_pipeline( - plx_content=my_plx_string, +# Disable directory loading (use only mthds_content) +runner3 = PipelexRunner( library_dirs=[], # Empty list disables directory-based loading +) +response3 = await runner3.execute_pipeline( + mthds_content=my_plx_string, inputs={"input": "value"}, ) +pipe_output3 = response3.pipe_output ``` ### Priority Resolution Examples @@ -194,11 +212,15 @@ export PIPELEXPATH="/shared/pipes" ``` ```python +from pipelex.pipeline.runner import PipelexRunner + # Python: No library_dirs specified anywhere Pipelex.make() # No library_dirs # Uses PIPELEXPATH: /shared/pipes -output = await execute_pipeline(pipe_code="my_pipe", inputs={...}) +runner = PipelexRunner() +response = await runner.execute_pipeline(pipe_code="my_pipe", inputs={...}) +pipe_output = response.pipe_output ``` **Example 2: Instance default overrides PIPELEXPATH** @@ -209,24 +231,33 @@ export PIPELEXPATH="/shared/pipes" ``` ```python +from pipelex.pipeline.runner import PipelexRunner + # Python: Instance default set Pipelex.make(library_dirs=["/project/pipes"]) # Uses instance default: /project/pipes (PIPELEXPATH ignored) -output = await execute_pipeline(pipe_code="my_pipe", inputs={...}) +runner = PipelexRunner() +response = await runner.execute_pipeline(pipe_code="my_pipe", inputs={...}) +pipe_output = response.pipe_output ``` **Example 3: Per-call override takes highest priority** ```python +from pipelex.pipeline.runner import PipelexRunner + Pipelex.make(library_dirs=["/default/pipes"]) -# Uses per-call value: /special/pipes -output = await execute_pipeline( - pipe_code="my_pipe", +# Uses per-runner value: /special/pipes +runner = PipelexRunner( library_dirs=["/special/pipes"], # Highest priority +) +response = await runner.execute_pipeline( + pipe_code="my_pipe", inputs={...}, ) +pipe_output = response.pipe_output ``` ### Best Practices @@ -237,9 +268,9 @@ output = await execute_pipeline( 3. **Use per-call `library_dirs` for exceptions**: Override only when a specific execution needs different directories. -4. **Use empty list `[]` for isolated execution**: When you want to execute only from `plx_content` without loading any file-based definitions. +4. **Use empty list `[]` for isolated execution**: When you want to execute only from `mthds_content` without loading any file-based definitions. -5. **Include structure class directories**: Remember that `library_dirs` must contain both `.plx` files AND Python files defining `StructuredContent` classes. +5. **Include structure class directories**: Remember that `library_dirs` must contain both `.mthds` files AND Python files defining `StructuredContent` classes. ## Excluded Directories @@ -255,11 +286,11 @@ To improve performance and avoid loading unnecessary files, Pipelex automaticall - `.env` - Environment files - `results` - Common output directory -Files in these directories will not be scanned, even if they contain `.plx` files or structure classes. +Files in these directories will not be scanned, even if they contain `.mthds` files or structure classes. ## Project Organization -**Golden rule:** Put `.plx` files where they make sense in YOUR project. Pipelex finds them automatically. +**Golden rule:** Put `.mthds` files where they make sense in YOUR project. Pipelex finds them automatically. ### Common Patterns @@ -273,11 +304,11 @@ your_project/ β”‚ β”œβ”€β”€ finance/ β”‚ β”‚ β”œβ”€β”€ models.py β”‚ β”‚ β”œβ”€β”€ services.py -β”‚ β”‚ β”œβ”€β”€ invoices.plx # With finance code +β”‚ β”‚ β”œβ”€β”€ invoices.mthds # With finance code β”‚ β”‚ └── invoices_struct.py β”‚ └── legal/ β”‚ β”œβ”€β”€ models.py -β”‚ β”œβ”€β”€ contracts.plx # With legal code +β”‚ β”œβ”€β”€ contracts.mthds # With legal code β”‚ └── contracts_struct.py β”œβ”€β”€ .pipelex/ └── requirements.txt @@ -297,9 +328,9 @@ Group all pipelines in one place: your_project/ β”œβ”€β”€ my_project/ β”‚ β”œβ”€β”€ pipelines/ # All pipelines here -β”‚ β”‚ β”œβ”€β”€ finance.plx +β”‚ β”‚ β”œβ”€β”€ finance.mthds β”‚ β”‚ β”œβ”€β”€ finance_struct.py -β”‚ β”‚ β”œβ”€β”€ legal.plx +β”‚ β”‚ β”œβ”€β”€ legal.mthds β”‚ β”‚ └── legal_struct.py β”‚ └── core/ └── .pipelex/ @@ -321,10 +352,10 @@ your_project/ β”œβ”€β”€ my_project/ β”‚ β”œβ”€β”€ features/ β”‚ β”‚ β”œβ”€β”€ document_processing/ -β”‚ β”‚ β”‚ β”œβ”€β”€ extract.plx +β”‚ β”‚ β”‚ β”œβ”€β”€ extract.mthds β”‚ β”‚ β”‚ └── extract_struct.py β”‚ β”‚ └── image_generation/ -β”‚ β”‚ β”œβ”€β”€ generate.plx +β”‚ β”‚ β”œβ”€β”€ generate.mthds β”‚ β”‚ └── generate_struct.py β”‚ └── main.py └── .pipelex/ @@ -337,11 +368,11 @@ your_project/ β”œβ”€β”€ my_project/ β”‚ β”œβ”€β”€ finance/ β”‚ β”‚ β”œβ”€β”€ pipelines/ -β”‚ β”‚ β”‚ └── invoices.plx +β”‚ β”‚ β”‚ └── invoices.mthds β”‚ β”‚ └── invoice_struct.py β”‚ β”œβ”€β”€ legal/ β”‚ β”‚ β”œβ”€β”€ pipelines/ -β”‚ β”‚ β”‚ └── contracts.plx +β”‚ β”‚ β”‚ └── contracts.mthds β”‚ β”‚ └── contract_struct.py β”‚ └── main.py └── .pipelex/ @@ -352,7 +383,7 @@ your_project/ ``` your_project/ β”œβ”€β”€ my_project/ -β”‚ β”œβ”€β”€ invoice_processing.plx +β”‚ β”œβ”€β”€ invoice_processing.mthds β”‚ β”œβ”€β”€ invoice_struct.py β”‚ └── main.py └── .pipelex/ @@ -364,14 +395,14 @@ Pipelex loads your pipelines in a specific order to ensure dependencies are reso ### 1. Domain Loading -- Loads domain definitions from all `.plx` files +- Loads domain definitions from all `.mthds` files - Each domain must be defined exactly once - Supports system prompts and structure templates per domain ### 2. Concept Loading - Loads native concepts (Text, Image, PDF, etc.) -- Loads custom concepts from `.plx` files +- Loads custom concepts from `.mthds` files - Validates concept definitions and relationships - Links concepts to Python structure classes by name @@ -383,7 +414,7 @@ Pipelex loads your pipelines in a specific order to ensure dependencies are reso ### 4. Pipe Loading -- Loads pipe definitions from `.plx` files +- Loads pipe definitions from `.mthds` files - Validates pipe configurations - Links pipes with their respective domains - Resolves input/output concept references @@ -441,9 +472,9 @@ pipelex show pipe YOUR_PIPE_CODE ### 1. Organization -- Keep related concepts and pipes in the same `.plx` file +- Keep related concepts and pipes in the same `.mthds` file - Use meaningful domain codes that reflect functionality -- Match Python file names with PLX file names (`finance.plx` β†’ `finance.py`) +- Match Python file names with MTHDS file names (`finance.mthds` β†’ `finance.py`) - Group complex pipelines using subdirectories ### 2. Structure Classes @@ -452,7 +483,7 @@ pipelex show pipe YOUR_PIPE_CODE - Name classes to match concept names exactly - Use `_struct.py` suffix for files containing structure classes (e.g., `finance_struct.py`) - Inherit from `StructuredContent` or its subclasses -- Place structure class files near their corresponding `.plx` files +- Place structure class files near their corresponding `.mthds` files - **Keep modules clean**: Avoid module-level code that executes on import (Pipelex imports modules during auto-discovery) ### 3. Custom Functions @@ -474,11 +505,11 @@ pipelex show pipe YOUR_PIPE_CODE ### Pipelines Not Found -**Problem:** Pipelex doesn't find your `.plx` files. +**Problem:** Pipelex doesn't find your `.mthds` files. **Solutions:** -1. Ensure files have the `.plx` extension +1. Ensure files have the `.mthds` extension 2. Check that files are not in excluded directories 3. Verify file permissions allow reading 4. Run `pipelex show pipes` to see what was discovered diff --git a/docs/home/9-tools/cli/build/inputs.md b/docs/home/9-tools/cli/build/inputs.md index f476d4b61..c80ccccfc 100644 --- a/docs/home/9-tools/cli/build/inputs.md +++ b/docs/home/9-tools/cli/build/inputs.md @@ -10,7 +10,7 @@ pipelex build inputs [OPTIONS] **Arguments:** -- `TARGET` - Either a pipe code or a bundle file path (`.plx`) - auto-detected +- `TARGET` - Either a pipe code or a bundle file path (`.mthds`) - auto-detected **Options:** @@ -23,13 +23,13 @@ pipelex build inputs [OPTIONS] **Generate inputs from a bundle (uses main_pipe):** ```bash -pipelex build inputs my_bundle.plx +pipelex build inputs my_bundle.mthds ``` **Specify which pipe to use from a bundle:** ```bash -pipelex build inputs my_bundle.plx --pipe my_pipe +pipelex build inputs my_bundle.mthds --pipe my_pipe ``` **Generate inputs for a pipe using a library directory:** @@ -41,7 +41,7 @@ pipelex build inputs my_domain.my_pipe -L ./my_library/ **Custom output path:** ```bash -pipelex build inputs my_bundle.plx --output custom_inputs.json +pipelex build inputs my_bundle.mthds --output custom_inputs.json ``` ## Output Format diff --git a/docs/home/9-tools/cli/build/output.md b/docs/home/9-tools/cli/build/output.md index 36a90a05f..4945db535 100644 --- a/docs/home/9-tools/cli/build/output.md +++ b/docs/home/9-tools/cli/build/output.md @@ -10,7 +10,7 @@ pipelex build output [OPTIONS] **Arguments:** -- `TARGET` - Either a pipe code or a bundle file path (`.plx`) - auto-detected +- `TARGET` - Either a pipe code or a bundle file path (`.mthds`) - auto-detected **Options:** @@ -27,19 +27,19 @@ pipelex build output [OPTIONS] **Generate output from a bundle (uses main_pipe):** ```bash -pipelex build output my_bundle.plx +pipelex build output my_bundle.mthds ``` **Generate JSON Schema for TypeScript/Zod integration:** ```bash -pipelex build output my_bundle.plx --format schema +pipelex build output my_bundle.mthds --format schema ``` **Specify which pipe to use from a bundle:** ```bash -pipelex build output my_bundle.plx --pipe my_pipe +pipelex build output my_bundle.mthds --pipe my_pipe ``` **Generate output for a pipe using a library directory:** @@ -51,7 +51,7 @@ pipelex build output my_domain.my_pipe -L ./my_library/ **Custom output path:** ```bash -pipelex build output my_bundle.plx --output expected_output.json +pipelex build output my_bundle.mthds --output expected_output.json ``` ## Output Formats diff --git a/docs/home/9-tools/cli/build/pipe.md b/docs/home/9-tools/cli/build/pipe.md index 3914d6382..9fc2679e6 100644 --- a/docs/home/9-tools/cli/build/pipe.md +++ b/docs/home/9-tools/cli/build/pipe.md @@ -6,7 +6,7 @@ !!! tip "Built with Pipelex" The Pipe Builder is itself a Pipelex pipeline! This showcases the power of Pipelex: a tool that builds pipelines... using a pipeline. -The Pipe Builder is an AI-powered tool that generates Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder translates your requirements into a working `.plx` file. +The Pipe Builder is an AI-powered tool that generates Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder translates your requirements into a working `.mthds` file. !!! info "Deep Dive" Want to understand how the Pipe Builder works under the hood? See [Pipe Builder Deep Dive](../../pipe-builder.md) for the full explanation of its multi-step generation process. @@ -26,7 +26,7 @@ pipelex build pipe [OPTIONS] - `--output-name`, `-o` - Base name for the generated file or directory (without extension) - `--output-dir` - Directory where files will be generated - `--no-output` - Skip saving the pipeline to file (useful for testing) -- `--no-extras` - Skip generating `inputs.json` and `runner.py`, only generate the PLX file +- `--no-extras` - Skip generating `inputs.json` and `runner.py`, only generate the MTHDS file - `--builder-pipe` - Builder pipe to use for generating the pipeline (default: `pipe_builder`) - `--graph` / `--no-graph` - Generate execution graphs for both build process and built pipeline - `--graph-full-data` / `--graph-no-data` - Include or exclude full serialized data in graphs (requires `--graph`) @@ -37,7 +37,7 @@ The resulting pipeline will be saved in a folder (e.g., `pipeline_01/`) containi | File | Description | |------|-------------| -| `bundle.plx` | The pipeline definition | +| `bundle.mthds` | The pipeline definition | | `inputs.json` | Template for pipeline inputs | | `run_{pipe_code}.py` | Python script to run the pipeline | | `structures/` | Generated Pydantic models for your concepts | @@ -45,7 +45,7 @@ The resulting pipeline will be saved in a folder (e.g., `pipeline_01/`) containi | `bundle_view.svg` | SVG visualization of the build process and plan | | `__init__.py` | Python package init file | -The HTML and SVG files provide a visual representation of the resulting workflow. +The HTML and SVG files provide a visual representation of the resulting method. ## Examples @@ -67,7 +67,7 @@ pipelex build pipe "Extract data from invoices" -o invoice_extractor pipelex build pipe "Analyze customer feedback" --output-dir ./pipelines/ ``` -**Generate only the PLX file (no extras):** +**Generate only the MTHDS file (no extras):** ```bash pipelex build pipe "Summarize documents" --no-extras @@ -87,7 +87,7 @@ pipelex build pipe "Take a CV in a PDF file and a Job offer text, and analyze if pipelex build pipe "Extract structured data from invoice images" ``` -**Multi-step Workflows:** +**Multi-step Methods:** ```bash pipelex build pipe "Given an RFP PDF, build a compliance matrix" @@ -111,12 +111,12 @@ The Pipe Builder is in active development and currently: After generating your pipeline: -1. **Validate it**: `pipelex validate your_pipe.plx` - See [Validate Commands](../validate.md) -2. **Run it**: `pipelex run your_pipe.plx` - See [Run Command](../run.md) -3. **Generate a runner**: `pipelex build runner your_pipe.plx` - See [Build Runner](runner.md) +1. **Validate it**: `pipelex validate your_pipe.mthds` - See [Validate Commands](../validate.md) +2. **Run it**: `pipelex run your_pipe.mthds` - See [Run Command](../run.md) +3. **Generate a runner**: `pipelex build runner your_pipe.mthds` - See [Build Runner](runner.md) 4. **Generate structures**: `pipelex build structures ./` - See [Build Structures](structures.md) -5. **Generate input template**: `pipelex build inputs your_pipe.plx` - See [Build Inputs](inputs.md) -6. **View output structure**: `pipelex build output your_pipe.plx` - See [Build Output](output.md) +5. **Generate input template**: `pipelex build inputs your_pipe.mthds` - See [Build Inputs](inputs.md) +6. **View output structure**: `pipelex build output your_pipe.mthds` - See [Build Output](output.md) ## Related Documentation diff --git a/docs/home/9-tools/cli/build/runner.md b/docs/home/9-tools/cli/build/runner.md index fcede599d..0b99d2c03 100644 --- a/docs/home/9-tools/cli/build/runner.md +++ b/docs/home/9-tools/cli/build/runner.md @@ -10,11 +10,11 @@ pipelex build runner [OPTIONS] **Arguments:** -- `TARGET` - Bundle file path (`.plx`) +- `TARGET` - Bundle file path (`.mthds`) **Options:** -- `--pipe` - Pipe code to use (optional if the `.plx` declares a `main_pipe`) +- `--pipe` - Pipe code to use (optional if the `.mthds` declares a `main_pipe`) - `--output`, `-o` - Path to save the generated Python file (defaults to target's directory) - `--library-dirs`, `-L` - Directories to search for pipe definitions. Can be specified multiple times. @@ -23,25 +23,25 @@ pipelex build runner [OPTIONS] **Generate runner from a bundle (uses main_pipe):** ```bash -pipelex build runner my_bundle.plx +pipelex build runner my_bundle.mthds ``` **Specify which pipe to use from a bundle:** ```bash -pipelex build runner my_bundle.plx --pipe my_pipe +pipelex build runner my_bundle.mthds --pipe my_pipe ``` **With additional library directories:** ```bash -pipelex build runner my_bundle.plx -L ./shared_pipes/ -L ./common/ +pipelex build runner my_bundle.mthds -L ./shared_pipes/ -L ./common/ ``` **Custom output path:** ```bash -pipelex build runner my_bundle.plx --output custom_runner.py +pipelex build runner my_bundle.mthds --output custom_runner.py ``` ## What Gets Generated diff --git a/docs/home/9-tools/cli/build/structures.md b/docs/home/9-tools/cli/build/structures.md index dcb6611e8..60551cc20 100644 --- a/docs/home/9-tools/cli/build/structures.md +++ b/docs/home/9-tools/cli/build/structures.md @@ -10,7 +10,7 @@ pipelex build structures [OPTIONS] **Arguments:** -- `TARGET` - Either a library directory containing `.plx` files, or a specific `.plx` file +- `TARGET` - Either a library directory containing `.mthds` files, or a specific `.mthds` file **Options:** @@ -27,7 +27,7 @@ pipelex build structures ./my_pipelines/ **Generate structures from a specific bundle file:** ```bash -pipelex build structures ./my_pipeline/bundle.plx +pipelex build structures ./my_pipeline/bundle.mthds ``` **Generate structures to a specific output directory:** @@ -55,7 +55,7 @@ Now you have your structures as Python code: ## Example Output -For a concept defined in a `.plx` file like: +For a concept defined in a `.mthds` file like: ```toml [concept.CandidateProfile] diff --git a/docs/home/9-tools/cli/index.md b/docs/home/9-tools/cli/index.md index 485ebed79..de0a0bdcc 100644 --- a/docs/home/9-tools/cli/index.md +++ b/docs/home/9-tools/cli/index.md @@ -13,6 +13,7 @@ The Pipelex CLI is organized into several command groups: | [**show**](show.md) | Inspect configuration, pipes, and AI models | | [**run**](run.md) | Execute pipelines | | [**build**](build/index.md) | Generate pipelines, runners, and structures | +| [**pkg**](pkg.md) | Package management: initialize manifests, manage dependencies, and lock versions | ## Usage Tips @@ -23,8 +24,8 @@ The Pipelex CLI is organized into several command groups: 2. **Development Workflow** - - Write or generate pipelines in `.plx` files - - Validate with `pipelex validate your_pipe_code` or `pipelex validate your_bundle.plx` during development + - Write or generate pipelines in `.mthds` files + - Validate with `pipelex validate your_pipe_code` or `pipelex validate your_bundle.mthds` during development - Run `pipelex validate --all` before committing changes 3. **Running Pipelines** diff --git a/docs/home/9-tools/cli/pkg.md b/docs/home/9-tools/cli/pkg.md new file mode 100644 index 000000000..9a5d922ae --- /dev/null +++ b/docs/home/9-tools/cli/pkg.md @@ -0,0 +1,160 @@ +# Pkg Commands + +Manage package manifests and dependencies for your Pipelex project. + +## Pkg Init + +```bash +pipelex pkg init +pipelex pkg init --force +``` + +Scans `.mthds` files in the current directory, discovers domains and pipes, and generates a skeleton `METHODS.toml` manifest. + +The generated manifest includes: + +- A placeholder `address` (edit this to your actual package address) +- Version set to `0.1.0` +- All discovered domains listed in the `[exports]` section with their pipes + +**Options:** + +| Option | Description | +|--------|-------------| +| `--force`, `-f` | Overwrite an existing `METHODS.toml` | + +**Examples:** + +```bash +# Generate a manifest from .mthds files +pipelex pkg init + +# Overwrite an existing manifest +pipelex pkg init --force +``` + +!!! note + The command refuses to overwrite an existing `METHODS.toml` unless `--force` is specified. If no `.mthds` files are found in the current directory, the command exits with an error. + +## Pkg List + +```bash +pipelex pkg list +``` + +Finds the nearest `METHODS.toml` by walking up from the current directory and displays its contents in Rich-formatted tables: + +- **Package** β€” address, version, description, authors, license, MTHDS version +- **Dependencies** β€” alias, address, and version constraint for each dependency +- **Exports** β€” domain path and exported pipe names + +**Examples:** + +```bash +# Display the package manifest +pipelex pkg list +``` + +!!! note + If no `METHODS.toml` is found in the current directory or any parent directory (up to the `.git` boundary), the command exits with an error and suggests running `pipelex pkg init`. + +## Pkg Add + +```bash +pipelex pkg add ADDRESS [OPTIONS] +``` + +Adds a dependency entry to the `METHODS.toml` in the current directory. + +**Arguments:** + +| Argument | Required | Description | +|----------|----------|-------------| +| `ADDRESS` | Yes | Package address (e.g. `github.com/org/repo`) | + +**Options:** + +| Option | Default | Description | +|--------|---------|-------------| +| `--alias`, `-a` | Auto-derived | Dependency alias (snake_case) | +| `--version`, `-v` | `0.1.0` | Version constraint | +| `--path`, `-p` | β€” | Local filesystem path to the dependency | + +When no `--alias` is provided, the alias is automatically derived from the last segment of the address. For example, `github.com/acme/scoring-lib` produces the alias `scoring_lib` (hyphens and dots are replaced with underscores). + +**Examples:** + +```bash +# Add a remote dependency (alias auto-derived as "scoring_lib") +pipelex pkg add github.com/acme/scoring-lib --version "^2.0.0" + +# Add with an explicit alias +pipelex pkg add github.com/acme/scoring-lib --alias scoring --version "^2.0.0" + +# Add a local development dependency +pipelex pkg add github.com/acme/scoring-lib --version "2.0.0" --path "../scoring-lib" +``` + +!!! note + A `METHODS.toml` must already exist in the current directory. Run `pipelex pkg init` first if needed. The command also checks that the alias is unique β€” duplicate aliases are rejected. + +## Pkg Lock + +```bash +pipelex pkg lock +``` + +Resolves all remote dependencies (including transitive ones) and generates a `methods.lock` file next to `METHODS.toml`. The lock file records the exact version, SHA-256 integrity hash, and source URL for each resolved package. + +Local path dependencies are skipped β€” they are resolved from the filesystem and do not appear in the lock file. + +**Examples:** + +```bash +# Resolve dependencies and write the lock file +pipelex pkg lock +``` + +!!! note "Commit to Version Control" + You should commit `methods.lock` to your repository so that every collaborator and CI run installs the exact same dependency versions. + +## Pkg Install + +```bash +pipelex pkg install +``` + +Reads the `methods.lock` file and fetches any packages not already present in the local cache (`~/.mthds/packages/`). After fetching, it verifies the SHA-256 integrity of all cached packages against the lock file. + +**Examples:** + +```bash +# Install dependencies from the lock file +pipelex pkg install +``` + +!!! note + A `methods.lock` file must exist. Run `pipelex pkg lock` first to generate one. If a cached package's hash does not match the lock file, the command fails with an integrity error. + +## Pkg Update + +```bash +pipelex pkg update +``` + +Performs a **fresh resolve** of all dependencies β€” the existing `methods.lock` is ignored. After resolving, it rewrites the lock file and displays a diff showing added, removed, and updated packages. + +**Examples:** + +```bash +# Re-resolve all dependencies and update the lock file +pipelex pkg update +``` + +!!! tip + Use `pkg update` after changing version constraints in `METHODS.toml`. For day-to-day reproducible installs, use `pkg install` instead. + +## Related Documentation + +- [Packages](../../6-build-reliable-ai-workflows/packages.md) β€” Package system concepts, dependency workflow, and manifest reference +- [Validate](validate.md) β€” Validating pipelines and configuration diff --git a/docs/home/9-tools/cli/run.md b/docs/home/9-tools/cli/run.md index 5e3ae8616..e15978c9b 100644 --- a/docs/home/9-tools/cli/run.md +++ b/docs/home/9-tools/cli/run.md @@ -8,11 +8,11 @@ Execute a pipeline with optional inputs and outputs. pipelex run [TARGET] [OPTIONS] ``` -Executes a pipeline, either from a standalone bundle (.plx) file or from your project's pipe library. +Executes a pipeline, either from a standalone bundle (.mthds) file or from your project's pipe library. **Arguments:** -- `TARGET` - Either a pipe code or a bundle file path, auto-detected according to presence of the .plx file extension +- `TARGET` - Either a pipe code or a bundle file path, auto-detected according to presence of the .mthds file extension **Options:** @@ -22,7 +22,7 @@ Executes a pipeline, either from a standalone bundle (.plx) file or from your pr - `--output`, `-o` - Path to save output JSON (defaults to `results/run_{pipe_code}.json`) - `--no-output` - Skip saving output to file - `--no-pretty-print` - Skip pretty printing the main output -- `--library-dir`, `-L` - Directory to search for pipe definitions (.plx files). Can be specified multiple times. +- `--library-dir`, `-L` - Directory to search for pipe definitions (.mthds files). Can be specified multiple times. **Examples:** @@ -34,10 +34,10 @@ pipelex run hello_world pipelex run write_weekly_report --inputs weekly_report_data.json # Run a bundle file (uses its main_pipe) -pipelex run my_bundle.plx +pipelex run my_bundle.mthds # Run a specific pipe from a bundle -pipelex run my_bundle.plx --pipe extract_invoice +pipelex run my_bundle.mthds --pipe extract_invoice # Run with explicit options pipelex run --pipe hello_world --output my_output.json diff --git a/docs/home/9-tools/cli/show.md b/docs/home/9-tools/cli/show.md index 35a5819f3..1b27f3c53 100644 --- a/docs/home/9-tools/cli/show.md +++ b/docs/home/9-tools/cli/show.md @@ -38,7 +38,7 @@ pipelex show pipes This includes: - Internal Pipelex pipes (like the pipe builder) -- Pipes from your project's `.plx` files +- Pipes from your project's `.mthds` files - Pipes that are part of imported packages ## Show Pipe Definition diff --git a/docs/home/9-tools/cli/validate.md b/docs/home/9-tools/cli/validate.md index f710657cd..993131533 100644 --- a/docs/home/9-tools/cli/validate.md +++ b/docs/home/9-tools/cli/validate.md @@ -65,33 +65,33 @@ pipelex validate my_pipe -L ./pipelines ## Validate Bundle ```bash -pipelex validate BUNDLE_FILE.plx -pipelex validate --bundle BUNDLE_FILE.plx +pipelex validate BUNDLE_FILE.mthds +pipelex validate --bundle BUNDLE_FILE.mthds ``` -Validates all pipes defined in a bundle file. The command automatically detects `.plx` files as bundles. +Validates all pipes defined in a bundle file. The command automatically detects `.mthds` files as bundles. **Arguments:** -- `BUNDLE_FILE.plx` - Path to the bundle file (auto-detected by `.plx` extension) +- `BUNDLE_FILE.mthds` - Path to the bundle file (auto-detected by `.mthds` extension) **Options:** -- `--bundle BUNDLE_FILE.plx` - Explicitly specify the bundle file path +- `--bundle BUNDLE_FILE.mthds` - Explicitly specify the bundle file path - `--library-dir`, `-L` - Directory to search for additional pipe definitions. Can be specified multiple times. **Examples:** ```bash # Validate a bundle (auto-detected) -pipelex validate my_pipeline.plx -pipelex validate pipelines/invoice_processor.plx +pipelex validate my_pipeline.mthds +pipelex validate pipelines/invoice_processor.mthds # Validate a bundle (explicit option) -pipelex validate --bundle my_pipeline.plx +pipelex validate --bundle my_pipeline.mthds # Validate a bundle with additional library directories -pipelex validate my_bundle.plx -L ./shared_pipes +pipelex validate my_bundle.mthds -L ./shared_pipes ``` !!! note @@ -100,22 +100,22 @@ pipelex validate my_bundle.plx -L ./shared_pipes ## Validate Specific Pipe in Bundle ```bash -pipelex validate --bundle BUNDLE_FILE.plx --pipe PIPE_CODE +pipelex validate --bundle BUNDLE_FILE.mthds --pipe PIPE_CODE ``` Validates all pipes in a bundle, while ensuring a specific pipe exists in that bundle. The entire bundle is validated, not just the specified pipe. **Options:** -- `--bundle BUNDLE_FILE.plx` - Path to the bundle file +- `--bundle BUNDLE_FILE.mthds` - Path to the bundle file - `--pipe PIPE_CODE` - Pipe code that must exist in the bundle **Examples:** ```bash # Validate bundle and ensure specific pipe exists in it -pipelex validate --bundle my_pipeline.plx --pipe extract_invoice -pipelex validate --bundle invoice_processor.plx --pipe validate_amounts +pipelex validate --bundle my_pipeline.mthds --pipe extract_invoice +pipelex validate --bundle invoice_processor.mthds --pipe validate_amounts ``` !!! important "Bundle Validation Behavior" @@ -125,7 +125,7 @@ pipelex validate --bundle invoice_processor.plx --pipe validate_amounts All validation commands check: -- Syntax correctness of `.plx` files +- Syntax correctness of `.mthds` files - Concept and pipe definitions are valid - Input/output connections are correct - All referenced pipes and concepts exist diff --git a/docs/home/9-tools/pipe-builder.md b/docs/home/9-tools/pipe-builder.md index a0efc748c..f4e41eded 100644 --- a/docs/home/9-tools/pipe-builder.md +++ b/docs/home/9-tools/pipe-builder.md @@ -3,7 +3,7 @@ !!! warning "Beta Feature" The Pipe Builder is currently in beta and progressing fast. Expect frequent improvements and changes. -The Pipe Builder is an AI-powered tool that generates complete Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder creates a production-ready `.plx` file with concepts, pipes, and all the necessary structure. +The Pipe Builder is an AI-powered tool that generates complete Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder creates a production-ready `.mthds` file with concepts, pipes, and all the necessary structure. ## What It Does @@ -13,9 +13,9 @@ The Pipe Builder takes a brief description like: And generates: -- **Domain concepts** - Data structures for your workflow (e.g., `CVAnalysis`, `InterviewQuestion`) +- **Domain concepts** - Data structures for your method (e.g., `CVAnalysis`, `InterviewQuestion`) - **Pipe operators** - LLM calls, extractions, image generation steps -- **Pipe controllers** - Sequences, batches, parallels, conditions to orchestrate the flow +- **Pipe controllers** - Sequences, batches, parallel branches, conditions to orchestrate the flow - **A complete bundle** - Ready to validate and run ## How It Works @@ -89,7 +89,7 @@ Finally, everything is assembled into a complete Pipelex bundle: ## The Builder Pipeline -The Pipe Builder is defined in [`pipelex/builder/builder.plx`](https://github.com/Pipelex/pipelex/blob/main/pipelex/builder/builder.plx). The main orchestrator is a `PipeSequence` called `pipe_builder` that chains together: +The Pipe Builder is defined in [`pipelex/builder/builder.mthds`](https://github.com/Pipelex/pipelex/blob/main/pipelex/builder/builder.mthds). The main orchestrator is a `PipeSequence` called `pipe_builder` that chains together: ``` draft_the_plan β†’ draft_the_concepts β†’ structure_concepts β†’ draft_flow β†’ review_flow β†’ design_pipe_signatures β†’ write_bundle_header β†’ detail_pipe_spec (batched) β†’ assemble_pipelex_bundle_spec diff --git a/docs/home/9-tools/plxt.md b/docs/home/9-tools/plxt.md new file mode 100644 index 000000000..8326feb3c --- /dev/null +++ b/docs/home/9-tools/plxt.md @@ -0,0 +1,124 @@ +# plxt (Formatter & Linter) + +## Overview + +`plxt` is a fast formatting and linting tool for TOML, MTHDS, and PLX files in Pipelex projects. It ensures consistent style across all configuration and pipeline definition files, powered by the [taplo](https://taplo.tamasfe.dev/) engine. + +## Installation + +`plxt` is included as a Pipelex development dependency. It is automatically installed into your virtual environment when you run: + +```bash +make install +``` + +You can verify the installation with: + +```bash +.venv/bin/plxt --help +``` + +## Configuration + +`plxt` reads its configuration from `.pipelex/toml_config.toml` at the root of your project. This file controls file discovery, formatting rules, and per-file-type overrides. + +### File Discovery + +The `include` and `exclude` top-level keys control which files `plxt` processes: + +```toml +include = ["**/*.toml", "**/*.mthds", "**/*.plx"] + +exclude = [ + ".venv/**", + ".mypy_cache/**", + ".ruff_cache/**", + ".pytest_cache/**", + "__pycache__/**", + "target/**", + "node_modules/**", + ".git/**", + "*.lock", +] +``` + +### Supported File Types + +| Extension | Description | +|-----------|-------------| +| `.toml` | Standard TOML configuration files | +| `.mthds` | Pipelex pipeline method definitions | +| `.plx` | Pipelex pipeline execution files | + +### Key Formatting Options + +The `[formatting]` section in `toml_config.toml` controls the global formatting behavior. Each option can be overridden per file type using `[[rule]]` sections. + +| Option | Default | Description | +|--------|---------|-------------| +| `align_entries` | `true` | Align consecutive `key = value` entries so `=` signs line up | +| `align_comments` | `true` | Align end-of-line comments on consecutive lines | +| `array_trailing_comma` | `true` | Add a trailing comma after the last element in multiline arrays | +| `array_auto_expand` | `true` | Expand arrays to multiple lines when exceeding `column_width` | +| `column_width` | `80` | Target maximum line width used for auto-expand/collapse | +| `compact_arrays` | `true` | Omit spaces inside single-line array brackets | +| `trailing_newline` | `true` | Ensure files end with a newline character | +| `reorder_keys` | `false` | Sort top-level keys alphabetically | + +For the full list of options, see the comments in `.pipelex/toml_config.toml` or the [taplo configuration reference](https://taplo.tamasfe.dev/configuration/). + +### Per-File-Type Rules + +You can define `[[rule]]` sections to apply different formatting settings to different file types. For example, the default configuration includes separate rules for `.toml` files and for `.mthds`/`.plx` files: + +```toml +[[rule]] +include = ["**/*.toml"] +[rule.formatting] +# TOML-specific overrides here + +[[rule]] +include = ["**/*.mthds", "**/*.plx"] +[rule.formatting] +align_entries = true +array_auto_collapse = true +# ... more MTHDS/PLX-specific overrides +``` + +## Usage + +### Command Line + +Format all discovered files in place: + +```bash +.venv/bin/plxt fmt +``` + +Check formatting without modifying files (useful for CI): + +```bash +.venv/bin/plxt fmt --check +``` + +Lint all discovered files: + +```bash +.venv/bin/plxt lint +``` + +### Make Targets + +The following Make targets are available for convenience: + +| Target | Description | +|--------|-------------| +| `make plxt-format` | Format all TOML/MTHDS/PLX files | +| `make plxt-lint` | Lint all TOML/MTHDS/PLX files | +| `make merge-check-plxt-format` | Check formatting without modifying files | +| `make merge-check-plxt-lint` | Run lint check | + +`plxt` is also included in the composite check targets: + +- `make c` (check) runs `plxt-format` and `plxt-lint` alongside ruff, pyright, and mypy +- `make agent-check` includes `plxt-format` and `plxt-lint` in the full quality pipeline diff --git a/docs/index.md b/docs/index.md index 130ba76b0..de022e08f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,9 +6,9 @@ title: "What is Pipelex?" # What is Pipelex? -Pipelex is an open-source language that enables agents to build and run **repeatable AI workflows**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. +Pipelex is an open-source language that enables agents to build and run **repeatable AI methods**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. -Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.plx` files) is simple and human-readable, even for non-technical users. +Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.mthds` files) is simple and human-readable, even for non-technical users. Each step can be structured and validated, so you benefit from the reliability of software, and the intelligence of AI. @@ -16,20 +16,20 @@ Each step can be structured and validated, so you benefit from the reliability o ## Key Features -### πŸ”„ Repeatable AI Workflows -Build workflows that produce consistent, reliable results every time they run. +### πŸ”„ Repeatable AI Methods +Build methods that produce consistent, reliable results every time they run. ### 🧩 Concept-Driven Design Use semantic typing (Concepts) to ensure each step of your pipeline makes sense and connects logically. ### πŸ“ Human-Readable Language -Write workflows in `.plx` files that are easy to read, edit, and maintainβ€”even for non-developers. +Write methods in `.mthds` files that are easy to read, edit, and maintainβ€”even for non-developers. ### πŸ€– AI-Assisted Development -Generate and iterate on workflows using natural language with your favorite AI coding assistant. +Generate and iterate on methods using natural language with your favorite AI coding assistant. ### πŸ”§ Production-Ready -Validate, test, and deploy AI workflows with the same confidence as traditional software. +Validate, test, and deploy AI methods with the same confidence as traditional software. --- @@ -46,7 +46,7 @@ Pipelex solves these problems by: - **Breaking down complexity** into focused, manageable steps - **Ensuring consistency** through structured validation -- **Enabling iteration** with clear, editable workflows +- **Enabling iteration** with clear, editable methods - **Facilitating collaboration** with human-readable syntax --- diff --git a/docs/under-the-hood/architecture-overview.md b/docs/under-the-hood/architecture-overview.md index fa78e43df..7919d22bb 100644 --- a/docs/under-the-hood/architecture-overview.md +++ b/docs/under-the-hood/architecture-overview.md @@ -4,7 +4,7 @@ title: "Architecture Overview" # Architecture Overview -Pipelex is a Python framework for building and running **repeatable AI workflows** using a declarative language (`.plx` files). +Pipelex is a Python framework for building and running **repeatable AI methods** using a declarative language (`.mthds` files). --- @@ -51,7 +51,7 @@ Located in [`pipelex/core/`](https://github.com/Pipelex/pipelex/tree/main/pipele - **Concepts** - Semantic types with meaning (not just data types) - **Stuffs** - Knowledge objects combining a concept type with content - **Working Memory** - Runtime storage for data flowing through pipes -- **Bundles** - Complete pipeline definitions loaded from `.plx` files +- **Bundles** - Complete pipeline definitions loaded from `.mthds` files --- @@ -93,9 +93,9 @@ Each plugin translates Pipelex's unified interface into provider-specific API ca ```mermaid flowchart TB - subgraph PLX[".plx Pipeline Files"] + subgraph MTHDS[".mthds Pipeline Files"] direction LR - D1["Declarative workflow definitions"] + D1["Declarative method definitions"] end subgraph HL["HIGH-LEVEL: Business Logic"] @@ -145,7 +145,7 @@ flowchart TB A1["External Services"] end - PLX --> HL + MTHDS --> HL HL --> LL LL --> API ``` diff --git a/docs/under-the-hood/execution-graph-tracing.md b/docs/under-the-hood/execution-graph-tracing.md index 981d7e2f5..ba1482ec9 100644 --- a/docs/under-the-hood/execution-graph-tracing.md +++ b/docs/under-the-hood/execution-graph-tracing.md @@ -29,7 +29,7 @@ Pipe Execution β†’ GraphTracer β†’ GraphSpec β†’ Renderers β†’ HTML/Mermaid | Scenario | CLI | API | Result | |----------|-----|-----|--------| -| Generate execution graph | `pipelex run my_pipe --graph` | `execute_pipeline(..., execution_config.is_generate_graph=True)` | GraphSpec JSON + HTML viewers | +| Generate execution graph | `pipelex run my_pipe --graph` | `PipelexRunner(execution_config=...).execute_pipeline(...)` | GraphSpec JSON + HTML viewers | | Force include full data | `--graph --graph-full-data` | `data_inclusion.stuff_json_content=True` | Data embedded in IOSpec | | Force exclude data | `--graph --graph-no-data` | All `data_inclusion.*=False` | Previews only | | Dry run with graph | `--dry-run --graph` | `dry_run_pipe_with_graph(pipe)` | Graph of mock execution | @@ -60,14 +60,17 @@ pipelex run my_pipe --dry-run --graph --mock-inputs ### API ```python -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipe_run.dry_run_with_graph import dry_run_pipe_with_graph # Execute with graph tracing via config -result = await execute_pipeline( - pipe_code="my_pipe", +runner = PipelexRunner( execution_config=config.with_graph_config_overrides(generate_graph=True), ) +response = await runner.execute_pipeline( + pipe_code="my_pipe", +) +pipe_output = response.pipe_output # Dry run directly returns GraphSpec graph_spec = await dry_run_pipe_with_graph(pipe) diff --git a/docs/under-the-hood/index.md b/docs/under-the-hood/index.md index 4c850076e..e3027ec11 100644 --- a/docs/under-the-hood/index.md +++ b/docs/under-the-hood/index.md @@ -19,7 +19,7 @@ Welcome to the technical deep-dives of Pipelex. This section is for contributors - **Module Deep-Dives** - Detailed explanations of specific subsystems !!! info "Not Required for Using Pipelex" - You don't need to read this section to use Pipelex effectively. The [Home](../index.md) section covers everything you need to build workflows. + You don't need to read this section to use Pipelex effectively. The [Home](../index.md) section covers everything you need to build methods. --- diff --git a/docs/under-the-hood/reasoning-controls.md b/docs/under-the-hood/reasoning-controls.md index 904970c02..f465f8dd8 100644 --- a/docs/under-the-hood/reasoning-controls.md +++ b/docs/under-the-hood/reasoning-controls.md @@ -33,7 +33,7 @@ deep-analysis = { model = "@default-premium", temperature = 0.1, reasoning_effor ``` ```toml -# In a .plx file +# In a .mthds file [pipe.analyze_contract] type = "PipeLLM" model = "$deep-analysis" @@ -148,7 +148,7 @@ config: theme: base --- flowchart TB - A["LLMSetting
(PLX talent or API)"] -->|make_llm_job_params| B["LLMJobParams
reasoning_effort / reasoning_budget"] + A["LLMSetting
(MTHDS talent or API)"] -->|make_llm_job_params| B["LLMJobParams
reasoning_effort / reasoning_budget"] B --> C{Provider Worker} C -->|OpenAI Completions| D["_resolve_reasoning_effort()
-> effort string"] diff --git a/mkdocs.yml b/mkdocs.yml index d557c323d..982dcd27c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,6 +1,6 @@ site_name: Pipelex Documentation site_url: https://docs.pipelex.com/ -site_description: "Official documentation for Pipelex, a framework and language for building deterministic, repeatable AI workflows and knowledge pipelines." +site_description: "Official documentation for Pipelex, an open-source library/CLI for building and running deterministic, repeatable AI methods." docs_dir: docs repo_url: "https://github.com/Pipelex/pipelex" repo_name: "Pipelex on GitHub" @@ -95,7 +95,7 @@ nav: - v0.18.0 "Chicago": home/1-releases/chicago.md - Get Started: - The Pipe Builder: home/2-get-started/pipe-builder.md - - Write Workflows Manually: home/2-get-started/write-workflows-manually.md + - Write Methods Manually: home/2-get-started/write-methods-manually.md - Understand Pipelex: - The Know-How Graph: home/3-understand-pipelex/viewpoint.md - The Pipelex Paradigm: home/3-understand-pipelex/pipelex-paradigm/index.md @@ -120,8 +120,8 @@ nav: - Gateway Available Models: home/5-setup/gateway-models.md - Project Organization: home/5-setup/project-organization.md - Telemetry: home/5-setup/telemetry.md - - Build Reliable AI Workflows: - - Kick off a Pipeline Project: home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md + - Build Reliable AI Methods: + - Kick off a Method Project: home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md - Pipe Builder: home/6-build-reliable-ai-workflows/pipe-builder.md - Pipelex Bundle Specification: home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md - Domain: home/6-build-reliable-ai-workflows/domain.md @@ -131,9 +131,10 @@ nav: - Python classes: home/6-build-reliable-ai-workflows/concepts/python-classes.md - Native Concepts: home/6-build-reliable-ai-workflows/concepts/native-concepts.md - Refining Concepts: home/6-build-reliable-ai-workflows/concepts/refining-concepts.md - - Design and Run Pipelines: + - Design and Run Methods: - Overview: home/6-build-reliable-ai-workflows/pipes/index.md - Libraries: home/6-build-reliable-ai-workflows/libraries.md + - Packages: home/6-build-reliable-ai-workflows/packages.md - Executing Pipelines: home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md - Providing Inputs to Pipelines: home/6-build-reliable-ai-workflows/pipes/provide-inputs.md - Working Memory: home/6-build-reliable-ai-workflows/pipes/working-memory.md @@ -152,7 +153,7 @@ nav: - PipeParallel: home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md - PipeBatch: home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md - PipeCondition: home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md - - Optimize Cost & Quality: home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md + - Optimize Cost & Quality: home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md - LLM Structured Generation: home/6-build-reliable-ai-workflows/llm-structured-generation-config.md - LLM Prompting Style: home/6-build-reliable-ai-workflows/adapt-to-llm-prompting-style-openai-anthropic-mistral.md - Configuration (TOML reference): @@ -179,6 +180,7 @@ nav: - Validate: home/9-tools/cli/validate.md - Run: home/9-tools/cli/run.md - Show: home/9-tools/cli/show.md + - Pkg: home/9-tools/cli/pkg.md - Build: - Overview: home/9-tools/cli/build/index.md - Pipe: home/9-tools/cli/build/pipe.md @@ -187,6 +189,7 @@ nav: - Inputs: home/9-tools/cli/build/inputs.md - Output: home/9-tools/cli/build/output.md - Pipe Builder: home/9-tools/pipe-builder.md + - plxt (Formatter & Linter): home/9-tools/plxt.md - Logging: home/9-tools/logging.md - Advanced Customizations: - Overview: home/10-advanced-customizations/index.md diff --git a/pipelex/builder/CLAUDE.md b/pipelex/builder/CLAUDE.md index 211b99e5d..2e59176f1 100644 --- a/pipelex/builder/CLAUDE.md +++ b/pipelex/builder/CLAUDE.md @@ -1,11 +1,11 @@ # Builder -Transforms high-level specifications into valid, executable Pipelex pipeline bundles (`.plx` files). The builder is a spec-to-PLX compiler with built-in iterative repair. +Transforms high-level specifications into valid, executable Pipelex pipeline bundles (`.mthds` files). The builder is a spec-to-MTHDS compiler with built-in iterative repair. ## Core Flow ``` -PipelexBundleSpec β†’ to_blueprint() β†’ PipelexBundleBlueprint β†’ PLX file +PipelexBundleSpec β†’ to_blueprint() β†’ PipelexBundleBlueprint β†’ MTHDS file ↑ | | validate_bundle() | | @@ -21,7 +21,7 @@ builder.py # reconstruct_bundle_with_pipe_fixes() helper builder_loop.py # BuilderLoop β€” the main orchestration class builder_errors.py # Error types exceptions.py # Exception types -conventions.py # File naming defaults (bundle.plx, inputs.json) +conventions.py # File naming defaults (bundle.mthds, inputs.json) bundle_spec.py # PipelexBundleSpec β€” top-level spec model bundle_header_spec.py # Bundle header info runner_code.py # Code generation utilities @@ -91,9 +91,9 @@ The `build` command in `pipelex/cli/agent_cli/commands/build_cmd.py` calls `buil 1. Runs a "builder pipe" (itself a Pipelex pipeline) that generates a `PipelexBundleSpec` 2. Passes it to `BuilderLoop.build_and_fix()` -3. Converts the result to PLX via `PlxFactory.make_plx_content()` +3. Converts the result to MTHDS via `MthdsFactory.make_mthds_content()` 4. Saves to `pipelex-wip/` with incremental naming ## Talent System -Talents are abstract capability labels mapped to concrete model presets. Each talent enum (in `talents/`) maps to a `$preset` code used in PLX files. When modifying talents, update both the enum and its preset mapping dict. +Talents are abstract capability labels mapped to concrete model presets. Each talent enum (in `talents/`) maps to a `$preset` code used in MTHDS files. When modifying talents, update both the enum and its preset mapping dict. diff --git a/pipelex/builder/agentic_builder.mthds b/pipelex/builder/agentic_builder.mthds new file mode 100644 index 000000000..8375f04f0 --- /dev/null +++ b/pipelex/builder/agentic_builder.mthds @@ -0,0 +1,29 @@ +domain = "agentic_builder" +description = "Agent-focused builder sequences for structured generation. Assumes drafts are pre-generated." + +# No concepts defined - reuses from builder and pipe_design domains + +[pipe] + +# PipeBatch to detail all pipe specs from signatures +[pipe.detail_all_pipe_specs] +type = "PipeBatch" +description = "Generate detailed specs for all pipe signatures by batching over them." +inputs = { plan_draft = "builder.PlanDraft", pipe_signatures = "pipe_design.PipeSignature[]", concept_specs = "builder.ConceptSpec[]" } +output = "pipe_design.PipeSpec[]" +input_list_name = "pipe_signatures" +input_item_name = "pipe_signature" +branch_pipe_code = "pipe_design.detail_pipe_spec" + +# Main agent builder: from flow to bundle (skips all drafting) +[pipe.build_from_flow] +type = "PipeSequence" +description = "Build a complete PipelexBundleSpec from pre-generated flow and concepts. This is the main entry point for agent-driven building." +inputs = { brief = "builder.UserBrief", plan_draft = "builder.PlanDraft", prepared_flow = "builder.FlowDraft", concept_specs = "builder.ConceptSpec[]" } +output = "builder.PipelexBundleSpec" +steps = [ + { pipe = "builder.design_pipe_signatures", result = "pipe_signatures" }, + { pipe = "builder.write_bundle_header", result = "bundle_header_spec" }, + { pipe = "detail_all_pipe_specs", result = "pipe_specs" }, + { pipe = "builder.assemble_pipelex_bundle_spec", result = "pipelex_bundle_spec" }, +] diff --git a/pipelex/builder/agentic_builder.plx b/pipelex/builder/agentic_builder.plx deleted file mode 100644 index bba0bf1fb..000000000 --- a/pipelex/builder/agentic_builder.plx +++ /dev/null @@ -1,29 +0,0 @@ -domain = "agentic_builder" -description = "Agent-focused builder sequences for structured generation. Assumes drafts are pre-generated." - -# No concepts defined - reuses from builder and pipe_design domains - -[pipe] - -# PipeBatch to detail all pipe specs from signatures -[pipe.detail_all_pipe_specs] -type = "PipeBatch" -description = "Generate detailed specs for all pipe signatures by batching over them." -inputs = { plan_draft = "builder.PlanDraft", pipe_signatures = "pipe_design.PipeSignature[]", concept_specs = "builder.ConceptSpec[]" } -output = "pipe_design.PipeSpec[]" -input_list_name = "pipe_signatures" -input_item_name = "pipe_signature" -branch_pipe_code = "detail_pipe_spec" - -# Main agent builder: from flow to bundle (skips all drafting) -[pipe.build_from_flow] -type = "PipeSequence" -description = "Build a complete PipelexBundleSpec from pre-generated flow and concepts. This is the main entry point for agent-driven building." -inputs = { brief = "builder.UserBrief", plan_draft = "builder.PlanDraft", prepared_flow = "builder.FlowDraft", concept_specs = "builder.ConceptSpec[]" } -output = "builder.PipelexBundleSpec" -steps = [ - { pipe = "design_pipe_signatures", result = "pipe_signatures" }, - { pipe = "write_bundle_header", result = "bundle_header_spec" }, - { pipe = "detail_all_pipe_specs", result = "pipe_specs" }, - { pipe = "assemble_pipelex_bundle_spec", result = "pipelex_bundle_spec" } -] diff --git a/pipelex/builder/builder.plx b/pipelex/builder/builder.mthds similarity index 92% rename from pipelex/builder/builder.plx rename to pipelex/builder/builder.mthds index aba5d53ab..852b4a96b 100644 --- a/pipelex/builder/builder.plx +++ b/pipelex/builder/builder.mthds @@ -1,17 +1,17 @@ -domain = "builder" +domain = "builder" description = "Auto-generate a Pipelex bundle (concepts + pipes) from a short user brief." [concept] -UserBrief = "A short, natural-language description of what the user wants." -PlanDraft = "Natural-language pipeline plan text describing sequences, inputs, outputs." -ConceptDrafts = "Textual draft of the concepts to create." +UserBrief = "A short, natural-language description of what the user wants." +PlanDraft = "Natural-language pipeline plan text describing sequences, inputs, outputs." +ConceptDrafts = "Textual draft of the concepts to create." PipelexBundleSpec = "A Pipelex bundle spec." -BundleHeaderSpec = "A domain information object." -FlowDraft = "Draft of the flow of the pipeline." +BundleHeaderSpec = "A domain information object." +FlowDraft = "Draft of the flow of the pipeline." ## Concepts ConceptStructureSpec = "A concept spec with structure but without full implementation." -ConceptSpec = "A specification for a concept including its code, description, and a structure draft as plain text." +ConceptSpec = "A specification for a concept including its code, description, and a structure draft as plain text." [pipe] @@ -21,15 +21,15 @@ description = "This pipe is going to be the entry point for the builder. It will inputs = { brief = "UserBrief" } output = "PipelexBundleSpec" steps = [ - { pipe = "draft_the_plan", result = "plan_draft" }, - { pipe = "draft_the_concepts", result = "concept_drafts" }, - { pipe = "structure_concepts", result = "concept_specs" }, - { pipe = "draft_flow", result = "flow_draft" }, - { pipe = "review_flow", result = "prepared_flow" }, - { pipe = "design_pipe_signatures", result = "pipe_signatures" }, - { pipe = "write_bundle_header", result = "bundle_header_spec" }, - { pipe = "detail_pipe_spec", batch_over = "pipe_signatures", batch_as = "pipe_signature", result = "pipe_specs" }, - { pipe = "assemble_pipelex_bundle_spec", result = "pipelex_bundle_spec" } + { pipe = "draft_the_plan", result = "plan_draft" }, + { pipe = "draft_the_concepts", result = "concept_drafts" }, + { pipe = "structure_concepts", result = "concept_specs" }, + { pipe = "draft_flow", result = "flow_draft" }, + { pipe = "review_flow", result = "prepared_flow" }, + { pipe = "design_pipe_signatures", result = "pipe_signatures" }, + { pipe = "write_bundle_header", result = "bundle_header_spec" }, + { pipe = "pipe_design.detail_pipe_spec", batch_over = "pipe_signatures", batch_as = "pipe_signature", result = "pipe_specs" }, + { pipe = "assemble_pipelex_bundle_spec", result = "pipelex_bundle_spec" }, ] [pipe.draft_the_plan] @@ -222,7 +222,7 @@ Shape of the contract for PipeOperator is: - steps: List of sub-pipes to execute sequentially. Each step has: pipe (name of the pipe to execute), result (variable name). **PipeParallel:** -- parallels: List of sub-pipes to execute concurrently. +- branches: List of sub-pipes to execute concurrently. - add_each_output: Boolean - include individual outputs in combined result. - combined_output: Optional ConceptCode (PascalCase) for combined structure. @@ -365,15 +365,15 @@ The main pipe is the one that will carry out the main task of the pipeline, it s """ [pipe.assemble_pipelex_bundle_spec] -type = "PipeCompose" +type = "PipeCompose" description = "Compile the pipelex bundle spec." -inputs = { pipe_specs = "pipe_design.PipeSpec[]", concept_specs = "ConceptSpec[]", bundle_header_spec = "BundleHeaderSpec" } -output = "PipelexBundleSpec" +inputs = { pipe_specs = "pipe_design.PipeSpec[]", concept_specs = "ConceptSpec[]", bundle_header_spec = "BundleHeaderSpec" } +output = "PipelexBundleSpec" [pipe.assemble_pipelex_bundle_spec.construct] -domain = { from = "bundle_header_spec.domain_code" } -description = { from = "bundle_header_spec.description" } +domain = { from = "bundle_header_spec.domain_code" } +description = { from = "bundle_header_spec.description" } system_prompt = { from = "bundle_header_spec.system_prompt" } -main_pipe = { from = "bundle_header_spec.main_pipe" } -concept = { from = "concept_specs", list_to_dict_keyed_by = "the_concept_code" } -pipe = { from = "pipe_specs", list_to_dict_keyed_by = "pipe_code" } +main_pipe = { from = "bundle_header_spec.main_pipe" } +concept = { from = "concept_specs", list_to_dict_keyed_by = "the_concept_code" } +pipe = { from = "pipe_specs", list_to_dict_keyed_by = "pipe_code" } diff --git a/pipelex/builder/builder_loop.py b/pipelex/builder/builder_loop.py index e3586c50d..02e8c7fec 100644 --- a/pipelex/builder/builder_loop.py +++ b/pipelex/builder/builder_loop.py @@ -2,6 +2,8 @@ from pathlib import Path from typing import TYPE_CHECKING, cast +from mthds.models.pipeline_inputs import PipelineInputs + from pipelex import builder, log from pipelex.builder.builder import ( PipelexBundleSpec, @@ -16,17 +18,21 @@ from pipelex.builder.pipe.pipe_condition_spec import PipeConditionSpec from pipelex.builder.pipe.pipe_parallel_spec import PipeParallelSpec from pipelex.builder.pipe.pipe_sequence_spec import PipeSequenceSpec -from pipelex.client.protocol import PipelineInputs from pipelex.config import get_config from pipelex.core.concepts.native.concept_native import NativeConceptCode +from pipelex.core.packages.bundle_scanner import build_domain_exports_from_scan, scan_bundles_for_domain_info +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.manifest import MthdsPackageManifest +from pipelex.core.packages.manifest_parser import serialize_manifest_to_toml from pipelex.core.pipes.exceptions import PipeFactoryErrorType, PipeValidationErrorType from pipelex.core.pipes.pipe_blueprint import PipeCategory from pipelex.core.pipes.variable_multiplicity import format_concept_with_multiplicity, parse_concept_with_multiplicity +from pipelex.core.qualified_ref import QualifiedRef from pipelex.graph.graphspec import GraphSpec from pipelex.hub import get_required_pipe -from pipelex.language.plx_factory import PlxFactory +from pipelex.language.mthds_factory import MthdsFactory from pipelex.pipe_controllers.condition.special_outcome import SpecialOutcome -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipeline.validate_bundle import ValidateBundleError, validate_bundle from pipelex.system.configuration.configs import PipelineExecutionConfig from pipelex.tools.misc.file_utils import get_incremental_file_path, save_text_to_path @@ -49,12 +55,15 @@ async def build_and_fix( output_dir: str | None = None, ) -> tuple[PipelexBundleSpec, GraphSpec | None]: # TODO: Doesn't make sense to be able to put a builder_pipe code but hardcoding the Path to the builder pipe. - pipe_output = await execute_pipeline( - pipe_code=builder_pipe, + runner = PipelexRunner( library_dirs=[str(Path(builder.__file__).parent)], - inputs=inputs, execution_config=execution_config, ) + response = await runner.execute_pipeline( + pipe_code=builder_pipe, + inputs=inputs, + ) + pipe_output = response.pipe_output if is_save_working_memory_enabled: working_memory_path = get_incremental_file_path( @@ -69,15 +78,15 @@ async def build_and_fix( if is_save_first_iteration_enabled: try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) first_iteration_path = get_incremental_file_path( base_path=output_dir or "results/pipe-builder", base_name="generated_pipeline_1st_iteration", - extension="plx", + extension="mthds", ) - save_text_to_path(text=plx_content, path=str(first_iteration_path), create_directory=True) + save_text_to_path(text=mthds_content, path=str(first_iteration_path), create_directory=True) except PipelexBundleSpecBlueprintError as exc: - log.warning(f"Could not save first iteration PLX: {exc}") + log.warning(f"Could not save first iteration MTHDS: {exc}") max_attempts = get_config().pipelex.builder_config.fix_loop_max_attempts for attempt in range(1, max_attempts + 1): @@ -130,29 +139,32 @@ async def _fix_undeclared_concept_references( if pipelex_bundle_spec.pipe: for pipe_code, pipe_spec in pipelex_bundle_spec.pipe.items(): source = f"pipe '{pipe_code}'" - # Parse output + # Parse output β€” skip cross-package refs output_parse = parse_concept_with_multiplicity(pipe_spec.output) output_concept = output_parse.concept_ref_or_code - if "." not in output_concept or output_concept.split(".")[0] == pipelex_bundle_spec.domain: - bare_code = output_concept.split(".")[-1] if "." in output_concept else output_concept - concept_references.append((bare_code, source, "output")) + if not QualifiedRef.has_cross_package_prefix(output_concept): + if "." not in output_concept or output_concept.split(".")[0] == pipelex_bundle_spec.domain: + bare_code = output_concept.split(".")[-1] if "." in output_concept else output_concept + concept_references.append((bare_code, source, "output")) - # Parse inputs + # Parse inputs β€” skip cross-package refs if pipe_spec.inputs: for input_name, input_concept_str in pipe_spec.inputs.items(): input_parse = parse_concept_with_multiplicity(input_concept_str) input_concept = input_parse.concept_ref_or_code - if "." not in input_concept or input_concept.split(".")[0] == pipelex_bundle_spec.domain: - bare_code = input_concept.split(".")[-1] if "." in input_concept else input_concept - concept_references.append((bare_code, source, f"input '{input_name}'")) + if not QualifiedRef.has_cross_package_prefix(input_concept): + if "." not in input_concept or input_concept.split(".")[0] == pipelex_bundle_spec.domain: + bare_code = input_concept.split(".")[-1] if "." in input_concept else input_concept + concept_references.append((bare_code, source, f"input '{input_name}'")) - # Parse PipeParallel combined_output + # Parse PipeParallel combined_output β€” skip cross-package refs if isinstance(pipe_spec, PipeParallelSpec) and pipe_spec.combined_output: combined_parse = parse_concept_with_multiplicity(pipe_spec.combined_output) combined_concept = combined_parse.concept_ref_or_code - if "." not in combined_concept or combined_concept.split(".")[0] == pipelex_bundle_spec.domain: - bare_code = combined_concept.split(".")[-1] if "." in combined_concept else combined_concept - concept_references.append((bare_code, source, "combined_output")) + if not QualifiedRef.has_cross_package_prefix(combined_concept): + if "." not in combined_concept or combined_concept.split(".")[0] == pipelex_bundle_spec.domain: + bare_code = combined_concept.split(".")[-1] if "." in combined_concept else combined_concept + concept_references.append((bare_code, source, "combined_output")) # Collect concept references from concept definitions (refines, structure concept_ref, item_concept_ref) if pipelex_bundle_spec.concept: @@ -161,26 +173,28 @@ async def _fix_undeclared_concept_references( continue source = f"concept '{concept_code}'" - # Check refines + # Check refines β€” skip cross-package refs if concept_spec_or_name.refines: ref = concept_spec_or_name.refines - if "." not in ref or ref.split(".")[0] == pipelex_bundle_spec.domain: + if not QualifiedRef.has_cross_package_prefix(ref) and ("." not in ref or ref.split(".")[0] == pipelex_bundle_spec.domain): bare_code = ref.split(".")[-1] if "." in ref else ref concept_references.append((bare_code, source, "refines")) - # Check structure fields + # Check structure fields β€” skip cross-package refs if concept_spec_or_name.structure: for field_name, field_spec in concept_spec_or_name.structure.items(): if field_spec.concept_ref: ref = field_spec.concept_ref - if "." not in ref or ref.split(".")[0] == pipelex_bundle_spec.domain: - bare_code = ref.split(".")[-1] if "." in ref else ref - concept_references.append((bare_code, source, f"structure.{field_name}.concept_ref")) + if not QualifiedRef.has_cross_package_prefix(ref): + if "." not in ref or ref.split(".")[0] == pipelex_bundle_spec.domain: + bare_code = ref.split(".")[-1] if "." in ref else ref + concept_references.append((bare_code, source, f"structure.{field_name}.concept_ref")) if field_spec.item_concept_ref: ref = field_spec.item_concept_ref - if "." not in ref or ref.split(".")[0] == pipelex_bundle_spec.domain: - bare_code = ref.split(".")[-1] if "." in ref else ref - concept_references.append((bare_code, source, f"structure.{field_name}.item_concept_ref")) + if not QualifiedRef.has_cross_package_prefix(ref): + if "." not in ref or ref.split(".")[0] == pipelex_bundle_spec.domain: + bare_code = ref.split(".")[-1] if "." in ref else ref + concept_references.append((bare_code, source, f"structure.{field_name}.item_concept_ref")) # Step 2: Determine which are undeclared declared_concepts: set[str] = set() @@ -255,11 +269,14 @@ async def _fix_undeclared_concept_references( undeclared_concepts = "\n".join(lines) log.info(f"πŸ€– Generating ConceptSpec definitions for {len(undeclared)} undeclared concept(s) via LLM...") - concept_fixer_output = await execute_pipeline( - pipe_code="generate_missing_concepts", + concept_fixer_runner = PipelexRunner( library_dirs=[str(Path(builder.__file__).parent / "concept")], + ) + concept_fixer_response = await concept_fixer_runner.execute_pipeline( + pipe_code="generate_missing_concepts", inputs={"undeclared_concepts": undeclared_concepts}, ) + concept_fixer_output = concept_fixer_response.pipe_output generated_concepts_list = concept_fixer_output.working_memory.get_stuff_as_list( name="generate_missing_concepts", @@ -313,7 +330,7 @@ def _prune_unreachable_specs(self, pipelex_bundle_spec: PipelexBundleSpec) -> Pi if isinstance(pipe_spec, PipeSequenceSpec): sub_pipe_codes = [step.pipe_code for step in pipe_spec.steps] elif isinstance(pipe_spec, PipeParallelSpec): - sub_pipe_codes = [parallel.pipe_code for parallel in pipe_spec.parallels] + sub_pipe_codes = [branch.pipe_code for branch in pipe_spec.branches] elif isinstance(pipe_spec, PipeBatchSpec): sub_pipe_codes = [pipe_spec.branch_pipe_code] elif isinstance(pipe_spec, PipeConditionSpec): @@ -368,15 +385,18 @@ def _extract_local_bare_code(concept_ref_or_code: str, domain: str) -> str | Non """Extract a bare concept code only if the reference is local. A reference is considered local if it has no domain prefix or if - its domain prefix matches the bundle domain. + its domain prefix matches the bundle domain. Cross-package refs + (containing '->') are never local. Args: concept_ref_or_code: A concept reference like "Document", "my_domain.Document", or "external.Document" domain: The bundle's domain Returns: - The bare concept code if local, or None if external + The bare concept code if local, or None if external or cross-package """ + if QualifiedRef.has_cross_package_prefix(concept_ref_or_code): + return None if "." not in concept_ref_or_code: return concept_ref_or_code prefix, bare_code = concept_ref_or_code.rsplit(".", maxsplit=1) @@ -693,15 +713,15 @@ def _fix_bundle_validation_error( # Save second iteration if we made any changes (pipes or concepts) if (fixed_pipes or added_concepts) and is_save_second_iteration_enabled: try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) second_iteration_path = get_incremental_file_path( base_path=output_dir or "results/pipe-builder", base_name="generated_pipeline_2nd_iteration", - extension="plx", + extension="mthds", ) - save_text_to_path(text=plx_content, path=str(second_iteration_path)) + save_text_to_path(text=mthds_content, path=str(second_iteration_path)) except PipelexBundleSpecBlueprintError as exc: - log.warning(f"Could not save second iteration PLX: {exc}") + log.warning(f"Could not save second iteration MTHDS: {exc}") return pipelex_bundle_spec @@ -910,3 +930,47 @@ def _fix_concept_field_to_list( field_spec.choices = None return True + + +def maybe_generate_manifest_for_output(output_dir: Path) -> Path | None: + """Generate a METHODS.toml if the output directory contains multiple domains. + + Scans all .mthds files in the output directory, parses their headers to + extract domain and main_pipe information, and generates a METHODS.toml + if multiple distinct domains are found. + + Args: + output_dir: Directory to scan for .mthds files + + Returns: + Path to the generated METHODS.toml, or None if not generated + """ + mthds_files = sorted(output_dir.rglob("*.mthds")) + if not mthds_files: + return None + + # Parse each bundle to extract domain and pipe info + domain_pipes, domain_main_pipes, _blueprints, errors = scan_bundles_for_domain_info(mthds_files) + for error in errors: + log.warning(f"Could not parse {error}") + + # Only generate manifest when multiple domains are present + if len(domain_pipes) < 2: + return None + + # Build exports: include main_pipe and all pipes from each domain + exports = build_domain_exports_from_scan(domain_pipes, domain_main_pipes) + + dir_name = output_dir.name.replace("-", "_").replace(" ", "_").lower() + manifest = MthdsPackageManifest( + address=f"example.com/yourorg/{dir_name}", + version="0.1.0", + description=f"Package generated from {len(mthds_files)} .mthds file(s)", + exports=exports, + ) + + manifest_path = output_dir / MANIFEST_FILENAME + toml_content = serialize_manifest_to_toml(manifest) + manifest_path.write_text(toml_content, encoding="utf-8") + + return manifest_path diff --git a/pipelex/builder/bundle_spec.py b/pipelex/builder/bundle_spec.py index d3103e838..4dc565355 100644 --- a/pipelex/builder/bundle_spec.py +++ b/pipelex/builder/bundle_spec.py @@ -22,7 +22,7 @@ class PipelexBundleSpec(StructuredContent): Represents the top-level structure of a Pipelex bundle, which defines a domain with its concepts, pipes, and configuration. Bundles are the primary unit of - organization for Pipelex workflows, loaded from TOML files. + organization for Pipelex methods, loaded from TOML files. Attributes: domain: The domain identifier for this bundle in snake_case format. diff --git a/pipelex/builder/concept/concept_fixer.plx b/pipelex/builder/concept/concept_fixer.mthds similarity index 98% rename from pipelex/builder/concept/concept_fixer.plx rename to pipelex/builder/concept/concept_fixer.mthds index 46f8ee655..c8a176b94 100644 --- a/pipelex/builder/concept/concept_fixer.plx +++ b/pipelex/builder/concept/concept_fixer.mthds @@ -1,4 +1,4 @@ -domain = "concept_fixer" +domain = "concept_fixer" description = "Generate ConceptSpec definitions for missing concepts referenced in a pipeline." [concept] diff --git a/pipelex/builder/conventions.py b/pipelex/builder/conventions.py index 481150108..01ad111fd 100644 --- a/pipelex/builder/conventions.py +++ b/pipelex/builder/conventions.py @@ -4,5 +4,7 @@ and expected by the runner when auto-detecting from a directory. """ -DEFAULT_BUNDLE_FILE_NAME = "bundle.plx" +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION + +DEFAULT_BUNDLE_FILE_NAME = f"bundle{MTHDS_EXTENSION}" DEFAULT_INPUTS_FILE_NAME = "inputs.json" diff --git a/pipelex/builder/pipe/pipe_design.plx b/pipelex/builder/pipe/pipe_design.mthds similarity index 94% rename from pipelex/builder/pipe/pipe_design.plx rename to pipelex/builder/pipe/pipe_design.mthds index a2ec89d7b..b88e0ba91 100644 --- a/pipelex/builder/pipe/pipe_design.plx +++ b/pipelex/builder/pipe/pipe_design.mthds @@ -2,28 +2,28 @@ domain = "pipe_design" [concept] PipeSignature = "A pipe contract which says what the pipe does, not how it does it: code (the pipe code in snake_case), type, description, inputs, output." -PipeSpec = "A structured spec for a pipe (union)." +PipeSpec = "A structured spec for a pipe (union)." # Pipe controllers -PipeBatchSpec = "A structured spec for a PipeBatch." +PipeBatchSpec = "A structured spec for a PipeBatch." PipeConditionSpec = "A structured spec for a PipeCondition." -PipeParallelSpec = "A structured spec for a PipeParallel." -PipeSequenceSpec = "A structured spec for a PipeSequence." +PipeParallelSpec = "A structured spec for a PipeParallel." +PipeSequenceSpec = "A structured spec for a PipeSequence." # Pipe operators -PipeFuncSpec = "A structured spec for a PipeFunc." -PipeImgGenSpec = "A structured spec for a PipeImgGen." +PipeFuncSpec = "A structured spec for a PipeFunc." +PipeImgGenSpec = "A structured spec for a PipeImgGen." PipeComposeSpec = "A structured spec for a pipe jinja2." -PipeLLMSpec = "A structured spec for a PipeLLM." +PipeLLMSpec = "A structured spec for a PipeLLM." PipeExtractSpec = "A structured spec for a PipeExtract." -PipeFailure = "Details of a single pipe failure during dry run." +PipeFailure = "Details of a single pipe failure during dry run." [pipe] [pipe.detail_pipe_spec] -type = "PipeCondition" -description = "Route by signature.type to the correct spec emitter." -inputs = { plan_draft = "builder.PlanDraft", pipe_signature = "PipeSignature", concept_specs = "builder.ConceptSpec[]" } -output = "Anything" -expression = "pipe_signature.type" +type = "PipeCondition" +description = "Route by signature.type to the correct spec emitter." +inputs = { plan_draft = "builder.PlanDraft", pipe_signature = "PipeSignature", concept_specs = "builder.ConceptSpec[]" } +output = "Anything" +expression = "pipe_signature.type" default_outcome = "fail" [pipe.detail_pipe_spec.outcomes] @@ -278,4 +278,4 @@ CORRECT - template for string composition (prefix + field): code = { template = "INV-($ + the_order.id)" } @pipe_signature -""" \ No newline at end of file +""" diff --git a/pipelex/builder/pipe/pipe_parallel_spec.py b/pipelex/builder/pipe/pipe_parallel_spec.py index 216689cd6..6890bc16d 100644 --- a/pipelex/builder/pipe/pipe_parallel_spec.py +++ b/pipelex/builder/pipe/pipe_parallel_spec.py @@ -23,16 +23,16 @@ class PipeParallelSpec(PipeSpec): and their outputs can be combined or kept separate. Validation Rules: - 1. Parallels list must not be empty. - 2. Each parallel step must be a valid SubPipeSpec. + 1. Branches list must not be empty. + 2. Each branch must be a valid SubPipeSpec. 3. combined_output, when specified, must be a valid ConceptCode in PascalCase. - 4. Pipe codes in parallels must reference existing pipes (snake_case). + 4. Pipe codes in branches must reference existing pipes (snake_case). """ type: Literal["PipeParallel"] = "PipeParallel" pipe_category: Literal["PipeController"] = "PipeController" - parallels: list[SubPipeSpec] = Field(description="List of SubPipeSpec instances to execute concurrently.") + branches: list[SubPipeSpec] = Field(description="List of SubPipeSpec instances to execute concurrently.") add_each_output: bool = Field(description="Whether to include individual pipe outputs in the combined result.") combined_output: str | None = Field( default=None, @@ -74,7 +74,7 @@ def rendered_pretty(self, title: str | None = None, depth: int = 0) -> PrettyPri # Add parallel branches as a table parallel_group.renderables.append(Text()) # Blank line - parallels_table = Table( + branches_table = Table( title="Parallel Branches:", title_justify="left", title_style="not italic", @@ -84,28 +84,28 @@ def rendered_pretty(self, title: str | None = None, depth: int = 0) -> PrettyPri show_lines=True, border_style="dim", ) - parallels_table.add_column("Branch", style="dim", width=6, justify="right") - parallels_table.add_column("Pipe", style="red") - parallels_table.add_column("Result name", style="cyan") + branches_table.add_column("Branch", style="dim", width=6, justify="right") + branches_table.add_column("Pipe", style="red") + branches_table.add_column("Result name", style="cyan") - for idx, parallel in enumerate(self.parallels, start=1): - parallels_table.add_row(str(idx), parallel.pipe_code, parallel.result) + for idx, branch in enumerate(self.branches, start=1): + branches_table.add_row(str(idx), branch.pipe_code, branch.result) - parallel_group.renderables.append(parallels_table) + parallel_group.renderables.append(branches_table) return parallel_group @override def to_blueprint(self) -> PipeParallelBlueprint: base_blueprint = super().to_blueprint() - core_parallels = [parallel.to_blueprint() for parallel in self.parallels] + core_branches = [branch.to_blueprint() for branch in self.branches] return PipeParallelBlueprint( description=base_blueprint.description, inputs=base_blueprint.inputs, output=base_blueprint.output, type=self.type, pipe_category=self.pipe_category, - parallels=core_parallels, + branches=core_branches, add_each_output=self.add_each_output, combined_output=self.combined_output, ) diff --git a/pipelex/builder/pipe/pipe_sequence_spec.py b/pipelex/builder/pipe/pipe_sequence_spec.py index a75187be1..7bf19d980 100644 --- a/pipelex/builder/pipe/pipe_sequence_spec.py +++ b/pipelex/builder/pipe/pipe_sequence_spec.py @@ -16,7 +16,7 @@ class PipeSequenceSpec(PipeSpec): """PipeSequenceSpec orchestrates the execution of multiple pipes in a defined order, where each pipe's output can be used as input for subsequent pipes. This enables - building complex data processing workflows with step-by-step transformations. + building powerful methods with step-by-step transformations. """ type: SkipJsonSchema[Literal["PipeSequence"]] = "PipeSequence" diff --git a/pipelex/builder/runner_code.py b/pipelex/builder/runner_code.py index 2e27fbfe9..023fe80c2 100644 --- a/pipelex/builder/runner_code.py +++ b/pipelex/builder/runner_code.py @@ -165,7 +165,7 @@ def generate_runner_code(pipe: PipeAbstract, output_multiplicity: bool = False, Args: pipe: The pipe to generate runner code for output_multiplicity: Whether the output is a list (e.g., Text[]) - library_dir: Directory containing the PLX bundles to load + library_dir: Directory containing the MTHDS bundles to load """ # Get output information structure_class_name = pipe.output.concept.structure_class_name @@ -230,7 +230,7 @@ def generate_runner_code(pipe: PipeAbstract, output_multiplicity: bool = False, [ "", "from pipelex.pipelex import Pipelex", - "from pipelex.pipeline.execute import execute_pipeline", + "from pipelex.pipeline.runner import PipelexRunner", ] ) @@ -266,7 +266,8 @@ def generate_runner_code(pipe: PipeAbstract, output_multiplicity: bool = False, "", "", f"async def run_{pipe.code}() -> {return_type}:", - " pipe_output = await execute_pipeline(", + " runner = PipelexRunner()", + " response = await runner.execute_pipeline(", f' pipe_code="{pipe.code}",', ] @@ -282,6 +283,7 @@ def generate_runner_code(pipe: PipeAbstract, output_multiplicity: bool = False, function_lines.extend( [ " )", + " pipe_output = response.pipe_output", f" return {result_call}", "", "", diff --git a/pipelex/builder/synthetic_inputs/synthesize_image.plx b/pipelex/builder/synthetic_inputs/synthesize_image.mthds similarity index 60% rename from pipelex/builder/synthetic_inputs/synthesize_image.plx rename to pipelex/builder/synthetic_inputs/synthesize_image.mthds index d6890eb88..310d2c98c 100644 --- a/pipelex/builder/synthetic_inputs/synthesize_image.plx +++ b/pipelex/builder/synthetic_inputs/synthesize_image.mthds @@ -1,6 +1,6 @@ -domain = "synthetic_data" +domain = "synthetic_data" description = "Generate synthetic test images based on category and optional description. Supports photograph, screenshot, chart, diagram, document_scan, and handwritten categories." -main_pipe = "synthesize_image" +main_pipe = "synthesize_image" # ============================================================================ # CONCEPTS @@ -10,7 +10,14 @@ main_pipe = "synthesize_image" description = "Request for synthetic image generation" [concept.ImageRequest.structure] -category = {choices = ["photograph", "screenshot", "chart", "diagram", "document_scan", "handwritten"], description = "Image category", required = true} +category = { choices = [ + "photograph", + "screenshot", + "chart", + "diagram", + "document_scan", + "handwritten", +], description = "Image category", required = true } description = "Optional description of the image to generate" # ============================================================================ @@ -21,11 +28,11 @@ description = "Optional description of the image to generate" [pipe.synthesize_image] type = "PipeSequence" description = "Generate synthetic image: create prompt then render with category-specific model" -inputs = {request = "ImageRequest"} +inputs = { request = "ImageRequest" } output = "Image" steps = [ - {pipe = "create_image_prompt", result = "img_prompt"}, - {pipe = "route_rendering", result = "image"} + { pipe = "create_image_prompt", result = "img_prompt" }, + { pipe = "route_rendering", result = "image" }, ] # ---------------------------------------------------------------------------- @@ -35,7 +42,7 @@ steps = [ [pipe.create_image_prompt] type = "PipeLLM" description = "Create an image generation prompt based on category and description" -inputs = {request = "ImageRequest"} +inputs = { request = "ImageRequest" } output = "ImgGenPrompt" model = "$pipe-builder-img-gen-prompting" prompt = """ @@ -53,34 +60,34 @@ If the user description is empty or minimal, imagine something reasonable based # ---------------------------------------------------------------------------- [pipe.route_rendering] -type = "PipeCondition" -description = "Route to appropriate image generation model based on category" -inputs = {request = "ImageRequest", img_prompt = "ImgGenPrompt"} -output = "Image" -expression = "request.category" -outcomes = {photograph = "render_photo", document_scan = "render_ui", handwritten = "render_photo", screenshot = "render_ui", diagram = "render_ui", chart = "render_chart"} +type = "PipeCondition" +description = "Route to appropriate image generation model based on category" +inputs = { request = "ImageRequest", img_prompt = "ImgGenPrompt" } +output = "Image" +expression = "request.category" +outcomes = { photograph = "render_photo", document_scan = "render_ui", handwritten = "render_photo", screenshot = "render_ui", diagram = "render_ui", chart = "render_chart" } default_outcome = "render_photo" [pipe.render_photo] -type = "PipeImgGen" +type = "PipeImgGen" description = "Render photorealistic image" -inputs = {img_prompt = "ImgGenPrompt"} -output = "Image" -prompt = "$img_prompt" -model = "$synthesize-photo" +inputs = { img_prompt = "ImgGenPrompt" } +output = "Image" +prompt = "$img_prompt" +model = "$synthesize-photo" [pipe.render_ui] -type = "PipeImgGen" +type = "PipeImgGen" description = "Render UI/diagram image" -inputs = {img_prompt = "ImgGenPrompt"} -output = "Image" -prompt = "$img_prompt" -model = "$synthesize-ui" +inputs = { img_prompt = "ImgGenPrompt" } +output = "Image" +prompt = "$img_prompt" +model = "$synthesize-ui" [pipe.render_chart] -type = "PipeImgGen" +type = "PipeImgGen" description = "Render chart image" -inputs = {img_prompt = "ImgGenPrompt"} -output = "Image" -prompt = "$img_prompt" -model = "$synthesize-chart" +inputs = { img_prompt = "ImgGenPrompt" } +output = "Image" +prompt = "$img_prompt" +model = "$synthesize-chart" diff --git a/pipelex/cli/_cli.py b/pipelex/cli/_cli.py index 0a12c394f..c790ba120 100644 --- a/pipelex/cli/_cli.py +++ b/pipelex/cli/_cli.py @@ -11,6 +11,7 @@ from pipelex.cli.commands.graph_cmd import graph_app from pipelex.cli.commands.init.command import init_cmd from pipelex.cli.commands.init.ui.types import InitFocus +from pipelex.cli.commands.pkg.app import pkg_app from pipelex.cli.commands.run_cmd import run_cmd from pipelex.cli.commands.show_cmd import show_app from pipelex.cli.commands.validate_cmd import validate_cmd @@ -26,7 +27,7 @@ class PipelexCLI(TyperGroup): @override def list_commands(self, ctx: Context) -> list[str]: # List the commands in the proper order because natural ordering doesn't work between Typer groups and commands - return ["init", "doctor", "build", "validate", "run", "graph", "show", "which"] + return ["init", "doctor", "build", "validate", "run", "graph", "show", "which", "pkg"] @override def get_command(self, ctx: Context, cmd_name: str) -> Command | None: @@ -122,16 +123,22 @@ def app_callback( check_readiness() -@app.command(name="init", help="Initialize Pipelex configuration in a `.pipelex` directory") +@app.command(name="init", help="Initialize Pipelex configuration in ~/.pipelex (global) or project .pipelex (--local)") def init_command( focus: Annotated[InitFocus, typer.Argument(help="What to initialize: 'config', 'telemetry', or 'all'")] = InitFocus.ALL, + local: Annotated[ + bool, typer.Option("--local", "-l", help="Create project-level .pipelex/ at the detected project root instead of global ~/.pipelex/") + ] = False, ) -> None: """Initialize Pipelex configuration and telemetry. + By default, creates global configuration in ~/.pipelex/. + Use --local to create project-level overrides in {project_root}/.pipelex/. + Note: Config updates are not yet supported. This command always performs a full reset of the configuration. """ - init_cmd(focus=focus) + init_cmd(focus=focus, local=local) @app.command(name="doctor", help="Check Pipelex configuration health and suggest fixes") @@ -143,12 +150,13 @@ def doctor_command( app.add_typer( - build_app, name="build", help="Generate AI workflows from natural language requirements: pipelines in .plx format and python code to run them" + build_app, name="build", help="Generate AI methods from natural language requirements: pipelines in .mthds format and python code to run them" ) app.command(name="validate", help="Validate pipes: static validation for syntax and dependencies, dry-run execution for logic and consistency")( validate_cmd ) -app.command(name="run", help="Run a pipe, optionally providing a specific bundle file (.plx)")(run_cmd) +app.command(name="run", help="Run a pipe, optionally providing a specific bundle file (.mthds)")(run_cmd) app.add_typer(graph_app, name="graph", help="Generate and render execution graphs") app.add_typer(show_app, name="show", help="Show configuration, pipes, and list AI models") app.command(name="which", help="Locate where a pipe is defined, similar to 'which' for executables")(which_cmd) +app.add_typer(pkg_app, name="pkg", help="Package management: initialize and inspect METHODS.toml manifests") diff --git a/pipelex/cli/agent_cli/CLAUDE.md b/pipelex/cli/agent_cli/CLAUDE.md index 9c579f367..f6e0b33ea 100644 --- a/pipelex/cli/agent_cli/CLAUDE.md +++ b/pipelex/cli/agent_cli/CLAUDE.md @@ -1,6 +1,6 @@ # Agent CLI (`pipelex-agent`) -Machine-first CLI for building, running, and validating Pipelex workflow bundles (`.plx` files). All output is structured JSON to stdout (success) or stderr (error). No Rich formatting, no interactive prompts. +Machine-first CLI for building, running, and validating Pipelex method bundles (`.mthds` files). All output is structured JSON to stdout (success) or stderr (error). No Rich formatting, no interactive prompts. ## Companion: Agent Skills @@ -28,7 +28,7 @@ commands/ inputs_cmd.py # inputs β€” generate example input JSON concept_cmd.py # concept β€” JSON spec β†’ concept TOML pipe_cmd.py # pipe β€” JSON spec β†’ pipe TOML - assemble_cmd.py # assemble β€” combine TOML parts into .plx + assemble_cmd.py # assemble β€” combine TOML parts into .mthds graph_cmd.py # graph β€” render execution graph HTML models_cmd.py # models β€” list presets, aliases, talent mappings doctor_cmd.py # doctor β€” config health check @@ -38,14 +38,14 @@ commands/ | Command | Does | |---------|------| -| `build` | Runs BuilderLoop to generate a `.plx` from a natural language prompt | +| `build` | Runs BuilderLoop to generate a `.mthds` from a natural language prompt | | `run` | Executes a pipeline, returns JSON with main_stuff + working_memory | | `validate` | Dry-runs pipes/bundles, returns validation status per pipe | | `inputs` | Generates example input JSON for a given pipe | | `concept` | Converts a JSON concept spec into TOML | | `pipe` | Converts a JSON pipe spec (typed) into TOML | -| `assemble` | Merges concept + pipe TOML sections into a complete `.plx` file | -| `graph` | Generates graph visualization (HTML) from a .plx bundle via dry-run | +| `assemble` | Merges concept + pipe TOML sections into a complete `.mthds` file | +| `graph` | Generates graph visualization (HTML) from a .mthds bundle via dry-run | | `models` | Lists available model presets, aliases, waterfalls, and talent mappings | | `doctor` | Checks config, credentials, models health | diff --git a/pipelex/cli/agent_cli/_agent_cli.py b/pipelex/cli/agent_cli/_agent_cli.py index 239275b84..7380cce41 100644 --- a/pipelex/cli/agent_cli/_agent_cli.py +++ b/pipelex/cli/agent_cli/_agent_cli.py @@ -105,7 +105,7 @@ def run_command( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, inputs: Annotated[ str | None, @@ -125,7 +125,7 @@ def run_command( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Execute a pipeline and output JSON results.""" @@ -153,7 +153,7 @@ def validate_command( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, validate_all: Annotated[ bool, @@ -161,7 +161,7 @@ def validate_command( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Validate a pipe, bundle, or all pipes and output JSON results.""" @@ -186,7 +186,7 @@ def inputs_command( ] = None, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Generate example input JSON for a pipe.""" @@ -231,7 +231,7 @@ def pipe_command( pipe_cmd(pipe_type=pipe_type, spec=spec, spec_file=spec_file) -@app.command(name="assemble", help="Assemble a complete .plx bundle from TOML parts") +@app.command(name="assemble", help="Assemble a complete .mthds bundle from TOML parts") def assemble_command( domain: Annotated[ str, @@ -243,7 +243,7 @@ def assemble_command( ], output: Annotated[ str, - typer.Option("--output", "-o", help="Output file path for the assembled bundle (.plx)"), + typer.Option("--output", "-o", help="Output file path for the assembled bundle (.mthds)"), ], description: Annotated[ str | None, @@ -262,7 +262,7 @@ def assemble_command( typer.Option("--pipes", "-p", help="TOML file(s) or inline TOML containing pipe definitions"), ] = None, ) -> None: - """Assemble a complete .plx bundle from individual TOML parts.""" + """Assemble a complete .mthds bundle from individual TOML parts.""" assemble_cmd( domain=domain, main_pipe=main_pipe, @@ -274,11 +274,11 @@ def assemble_command( ) -@app.command(name="graph", help="Generate graph visualization from a .plx bundle") +@app.command(name="graph", help="Generate graph visualization from a .mthds bundle") def graph_command( target: Annotated[ str, - typer.Argument(help="Path to a .plx bundle file"), + typer.Argument(help="Path to a .mthds bundle file"), ], graph_format: Annotated[ GraphFormat, @@ -286,10 +286,10 @@ def graph_command( ] = GraphFormat.REACTFLOW, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: - """Generate graph visualization from a .plx bundle.""" + """Generate graph visualization from a .mthds bundle.""" graph_cmd(target=target, graph_format=graph_format, library_dir=library_dir) diff --git a/pipelex/cli/agent_cli/commands/agent_output.py b/pipelex/cli/agent_cli/commands/agent_output.py index 8ddf5846e..8cbddf0ca 100644 --- a/pipelex/cli/agent_cli/commands/agent_output.py +++ b/pipelex/cli/agent_cli/commands/agent_output.py @@ -27,8 +27,8 @@ "ArgumentError": "Check command usage with 'pipelex-agent --help'", "JSONDecodeError": "Verify the JSON input is valid (check for trailing commas, unquoted keys, etc.)", # Interpreter errors - "PipelexInterpreterError": "Check PLX file TOML syntax and ensure all referenced concepts and pipes are defined", - "PLXDecodeError": "The PLX file has TOML syntax errors; validate TOML syntax before retrying", + "PipelexInterpreterError": "Check MTHDS file TOML syntax and ensure all referenced concepts and pipes are defined", + "MthdsDecodeError": "The MTHDS file has TOML syntax errors; validate TOML syntax before retrying", # Configuration/initialization errors "TelemetryConfigValidationError": "Run 'pipelex init telemetry' to create a valid telemetry configuration", "GatewayTermsNotAcceptedError": "Run 'pipelex init config' to accept gateway terms, or disable pipelex_gateway in backends.toml", @@ -58,14 +58,14 @@ } AGENT_ERROR_DOMAINS: dict[str, str] = { - # input = agent can fix (bad .plx, wrong args, bad JSON) + # input = agent can fix (bad .mthds, wrong args, bad JSON) "ValidateBundleError": "input", "PipeValidationError": "input", "FileNotFoundError": "input", "JSONDecodeError": "input", "JsonTypeError": "input", "ArgumentError": "input", - "PLXDecodeError": "input", + "MthdsDecodeError": "input", "PipelexInterpreterError": "input", "ValidationError": "input", "ValueError": "input", diff --git a/pipelex/cli/agent_cli/commands/assemble_cmd.py b/pipelex/cli/agent_cli/commands/assemble_cmd.py index da7c19257..8adc64297 100644 --- a/pipelex/cli/agent_cli/commands/assemble_cmd.py +++ b/pipelex/cli/agent_cli/commands/assemble_cmd.py @@ -69,7 +69,7 @@ def assemble_cmd( ], output: Annotated[ str, - typer.Option("--output", "-o", help="Output file path for the assembled bundle (.plx)"), + typer.Option("--output", "-o", help="Output file path for the assembled bundle (.mthds)"), ], description: Annotated[ str | None, @@ -88,7 +88,7 @@ def assemble_cmd( typer.Option("--pipes", "-p", help="TOML file(s) or inline TOML containing pipe definitions"), ] = None, ) -> None: - """Assemble a complete .plx bundle from individual TOML parts. + """Assemble a complete .mthds bundle from individual TOML parts. Combines domain configuration, concepts, and pipes into a single valid Pipelex bundle file. Each --concepts and --pipes argument can be either @@ -98,11 +98,11 @@ def assemble_cmd( Examples: pipelex-agent assemble --domain my_domain --main-pipe main - --concepts concepts.toml --pipes pipes.toml --output bundle.plx + --concepts concepts.toml --pipes pipes.toml --output bundle.mthds pipelex-agent assemble --domain my_domain --main-pipe main --concepts '[concept.MyInput]' --pipes '[pipe.main]' - --output bundle.plx + --output bundle.mthds """ try: # Create base document with domain header diff --git a/pipelex/cli/agent_cli/commands/build_cmd.py b/pipelex/cli/agent_cli/commands/build_cmd.py index 55ba93d56..c1aea35b7 100644 --- a/pipelex/cli/agent_cli/commands/build_cmd.py +++ b/pipelex/cli/agent_cli/commands/build_cmd.py @@ -40,7 +40,7 @@ def build_cmd( """Build a pipeline from a prompt and output JSON with paths. Outputs to pipelex-wip/ directory with incremental naming (pipeline_01, pipeline_02, etc.). - Generates PLX bundle only (no inputs.json or runner.py). + Generates MTHDS bundle only (no inputs.json or runner.py). Outputs JSON to stdout on success, JSON to stderr on error with exit code 1. """ diff --git a/pipelex/cli/agent_cli/commands/build_core.py b/pipelex/cli/agent_cli/commands/build_core.py index 0330ef4af..9952aac52 100644 --- a/pipelex/cli/agent_cli/commands/build_core.py +++ b/pipelex/cli/agent_cli/commands/build_core.py @@ -7,12 +7,13 @@ from pipelex import log from pipelex.builder.builder_errors import PipeBuilderError -from pipelex.builder.builder_loop import BuilderLoop +from pipelex.builder.builder_loop import BuilderLoop, maybe_generate_manifest_for_output from pipelex.builder.conventions import DEFAULT_INPUTS_FILE_NAME from pipelex.builder.exceptions import PipelexBundleSpecBlueprintError from pipelex.config import get_config +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION from pipelex.hub import get_required_pipe -from pipelex.language.plx_factory import PlxFactory +from pipelex.language.mthds_factory import MthdsFactory from pipelex.system.configuration.configs import PipelineExecutionConfig from pipelex.tools.misc.file_utils import ( ensure_directory_for_file_path, @@ -27,7 +28,7 @@ class BuildPipeResult(BaseModel): """Result of building a pipe, containing output paths and metadata.""" output_dir: Path - plx_file: Path + mthds_file: Path inputs_file: Path | None = None main_pipe_code: str domain: str @@ -44,7 +45,7 @@ def to_agent_json(self) -> dict[str, Any]: """ result: dict[str, Any] = { "output_dir": str(self.output_dir), - "plx_file": str(self.plx_file), + "mthds_file": str(self.mthds_file), "main_pipe_code": self.main_pipe_code, "domain": self.domain, } @@ -128,24 +129,29 @@ async def build_pipe_core( # Determine base output directory base_dir = output_dir or builder_config.default_output_dir - # Determine output path - always generate directory with bundle.plx + # Determine output path - always generate directory with bundle.mthds dir_name = output_name or builder_config.default_directory_base_name - bundle_file_name = Path(f"{builder_config.default_bundle_file_name}.plx") + bundle_file_name = Path(f"{builder_config.default_bundle_file_name}{MTHDS_EXTENSION}") extras_output_dir = get_incremental_directory_path( base_path=base_dir, base_name=dir_name, ) - plx_file_path = Path(extras_output_dir) / bundle_file_name + mthds_file_path = Path(extras_output_dir) / bundle_file_name - # Save the PLX file - ensure_directory_for_file_path(file_path=str(plx_file_path)) + # Save the MTHDS file + ensure_directory_for_file_path(file_path=str(mthds_file_path)) try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) except PipelexBundleSpecBlueprintError as exc: msg = f"Failed to convert bundle spec to blueprint: {exc}" raise BuildPipeError(message=msg) from exc - save_text_to_path(text=plx_content, path=str(plx_file_path)) + save_text_to_path(text=mthds_content, path=str(mthds_file_path)) + + # Generate METHODS.toml if multiple domains exist in output dir + manifest_path = maybe_generate_manifest_for_output(output_dir=Path(extras_output_dir)) + if manifest_path: + log.verbose(f"Package manifest generated: {manifest_path}") main_pipe_code = pipelex_bundle_spec.main_pipe or "" domain = pipelex_bundle_spec.domain or "" @@ -170,7 +176,7 @@ async def build_pipe_core( return BuildPipeResult( output_dir=Path(extras_output_dir), - plx_file=plx_file_path, + mthds_file=mthds_file_path, inputs_file=inputs_file_path, main_pipe_code=main_pipe_code, domain=domain, diff --git a/pipelex/cli/agent_cli/commands/graph_cmd.py b/pipelex/cli/agent_cli/commands/graph_cmd.py index 32ce593a2..dc46ca2fe 100644 --- a/pipelex/cli/agent_cli/commands/graph_cmd.py +++ b/pipelex/cli/agent_cli/commands/graph_cmd.py @@ -1,4 +1,4 @@ -"""Agent CLI graph command - generate graph HTML from a .plx bundle via dry-run.""" +"""Agent CLI graph command - generate graph HTML from a .mthds bundle via dry-run.""" import asyncio from pathlib import Path @@ -9,7 +9,7 @@ from pipelex.cli.agent_cli.commands.agent_cli_factory import make_pipelex_for_agent_cli from pipelex.cli.agent_cli.commands.agent_output import agent_error, agent_success from pipelex.config import get_config -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError from pipelex.core.interpreter.helpers import is_pipelex_file from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError @@ -18,7 +18,7 @@ from pipelex.pipe_run.pipe_run_mode import PipeRunMode from pipelex.pipelex import Pipelex from pipelex.pipeline.exceptions import PipelineExecutionError -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.types import StrEnum @@ -33,7 +33,7 @@ class GraphFormat(StrEnum): def graph_cmd( target: Annotated[ str, - typer.Argument(help="Path to a .plx bundle file"), + typer.Argument(help="Path to a .mthds bundle file"), ], graph_format: Annotated[ GraphFormat, @@ -41,10 +41,10 @@ def graph_cmd( ] = GraphFormat.REACTFLOW, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: - """Generate graph visualization from a .plx bundle. + """Generate graph visualization from a .mthds bundle. Performs a dry-run of the pipeline with mock inputs to produce the execution graph, then renders it as HTML. @@ -52,9 +52,9 @@ def graph_cmd( Outputs JSON to stdout on success, JSON to stderr on error with exit code 1. Examples: - pipelex-agent graph bundle.plx - pipelex-agent graph bundle.plx --format mermaidflow - pipelex-agent graph bundle.plx -L ./my_pipes/ + pipelex-agent graph bundle.mthds + pipelex-agent graph bundle.mthds --format mermaidflow + pipelex-agent graph bundle.mthds -L ./my_pipes/ """ input_path = Path(target) @@ -62,12 +62,12 @@ def graph_cmd( agent_error(f"File not found: {target}", "FileNotFoundError") if not is_pipelex_file(input_path): - agent_error(f"Expected a .plx bundle file, got: {input_path.name}", "ArgumentError") + agent_error(f"Expected a .mthds bundle file, got: {input_path.name}", "ArgumentError") - # Read PLX content and extract main pipe + # Read MTHDS content and extract main pipe try: - plx_content = input_path.read_text(encoding="utf-8") - bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + mthds_content = input_path.read_text(encoding="utf-8") + bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) main_pipe_code = bundle_blueprint.main_pipe if not main_pipe_code: agent_error( @@ -77,7 +77,7 @@ def graph_cmd( pipe_code: str = main_pipe_code except (OSError, UnicodeDecodeError) as exc: agent_error(f"Failed to read bundle file '{target}': {exc}", type(exc).__name__, cause=exc) - except (PipelexInterpreterError, PLXDecodeError) as exc: + except (PipelexInterpreterError, MthdsDecodeError) as exc: agent_error(f"Failed to parse bundle '{target}': {exc}", type(exc).__name__, cause=exc) # Initialize Pipelex @@ -90,16 +90,19 @@ def graph_cmd( mock_inputs=True, ) - pipe_output = asyncio.run( - execute_pipeline( + runner = PipelexRunner( + bundle_uri=target, + pipe_run_mode=PipeRunMode.DRY, + execution_config=execution_config, + library_dirs=library_dir, + ) + response = asyncio.run( + runner.execute_pipeline( pipe_code=pipe_code, - plx_content=plx_content, - bundle_uri=target, - pipe_run_mode=PipeRunMode.DRY, - execution_config=execution_config, - library_dirs=library_dir, + mthds_content=mthds_content, ) ) + pipe_output = response.pipe_output if not pipe_output.graph_spec: agent_error("Pipeline execution did not produce a graph spec", "GraphSpecMissingError") diff --git a/pipelex/cli/agent_cli/commands/inputs_cmd.py b/pipelex/cli/agent_cli/commands/inputs_cmd.py index ea4cdc4b7..51ee144b2 100644 --- a/pipelex/cli/agent_cli/commands/inputs_cmd.py +++ b/pipelex/cli/agent_cli/commands/inputs_cmd.py @@ -33,7 +33,7 @@ async def _inputs_core( Args: pipe_code: The pipe code to generate inputs for. - bundle_path: Path to the bundle file (.plx). + bundle_path: Path to the bundle file (.mthds). library_dirs: List of library directories to search for pipe definitions. Returns: @@ -44,7 +44,7 @@ async def _inputs_core( NoInputsRequiredError: If the pipe has no inputs. """ if bundle_path: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) bundle_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: main_pipe_code = bundle_blueprint.main_pipe @@ -87,7 +87,7 @@ def inputs_cmd( ] = None, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Generate example input JSON for a pipe and output JSON results. @@ -96,8 +96,8 @@ def inputs_cmd( Examples: pipelex-agent inputs my_pipe - pipelex-agent inputs my_bundle.plx - pipelex-agent inputs my_bundle.plx --pipe my_pipe + pipelex-agent inputs my_bundle.mthds + pipelex-agent inputs my_bundle.mthds --pipe my_pipe pipelex-agent inputs my_pipe -L ./my_pipes """ # Validate that at least one target is provided @@ -112,7 +112,7 @@ def inputs_cmd( target_path = Path(target) if target_path.is_dir(): agent_error( - f"'{target}' is a directory. The inputs command requires a .plx file or a pipe code.", + f"'{target}' is a directory. The inputs command requires a .mthds file or a pipe code.", "ArgumentError", ) diff --git a/pipelex/cli/agent_cli/commands/pipe_cmd.py b/pipelex/cli/agent_cli/commands/pipe_cmd.py index d41276195..b641b8868 100644 --- a/pipelex/cli/agent_cli/commands/pipe_cmd.py +++ b/pipelex/cli/agent_cli/commands/pipe_cmd.py @@ -129,13 +129,13 @@ def _add_type_specific_fields(pipe_spec: PipeSpec, pipe_table: tomlkit.TOMLDocum pipe_table.add("add_each_output", pipe_spec.add_each_output) if pipe_spec.combined_output: pipe_table.add("combined_output", pipe_spec.combined_output) - parallels_array = tomlkit.array() - for parallel in pipe_spec.parallels: - parallel_inline = tomlkit.inline_table() - parallel_inline.append("pipe", parallel.pipe_code) - parallel_inline.append("result", parallel.result) - parallels_array.append(parallel_inline) - pipe_table.add("parallels", parallels_array) + branches_array = tomlkit.array() + for branch in pipe_spec.branches: + branch_inline = tomlkit.inline_table() + branch_inline.append("pipe", branch.pipe_code) + branch_inline.append("result", branch.result) + branches_array.append(branch_inline) + pipe_table.add("branches", branches_array) elif isinstance(pipe_spec, PipeConditionSpec): pipe_table.add("expression", pipe_spec.jinja2_expression_template) @@ -189,7 +189,7 @@ def _parse_pipe_spec_from_json(pipe_type: str, spec_data: dict[str, Any]) -> Pip # Add type to spec_data if not present spec_data["type"] = pipe_type - # Handle steps/parallels conversion - need to convert pipe to pipe_code + # Handle steps/branches conversion - need to convert pipe to pipe_code if "steps" in spec_data: converted_steps = [] for step in spec_data["steps"]: @@ -198,13 +198,13 @@ def _parse_pipe_spec_from_json(pipe_type: str, spec_data: dict[str, Any]) -> Pip converted_steps.append(step) spec_data["steps"] = converted_steps - if "parallels" in spec_data: - converted_parallels = [] - for parallel in spec_data["parallels"]: - if "pipe" in parallel and "pipe_code" not in parallel: - parallel["pipe_code"] = parallel.pop("pipe") - converted_parallels.append(parallel) - spec_data["parallels"] = converted_parallels + if "branches" in spec_data: + converted_branches = [] + for branch in spec_data["branches"]: + if "pipe" in branch and "pipe_code" not in branch: + branch["pipe_code"] = branch.pop("pipe") + converted_branches.append(branch) + spec_data["branches"] = converted_branches # Handle expression -> jinja2_expression_template for PipeCondition if pipe_type == "PipeCondition" and "expression" in spec_data: diff --git a/pipelex/cli/agent_cli/commands/run_cmd.py b/pipelex/cli/agent_cli/commands/run_cmd.py index 36f46147a..0b2990997 100644 --- a/pipelex/cli/agent_cli/commands/run_cmd.py +++ b/pipelex/cli/agent_cli/commands/run_cmd.py @@ -10,7 +10,7 @@ from pipelex.cli.agent_cli.commands.agent_cli_factory import make_pipelex_for_agent_cli from pipelex.cli.agent_cli.commands.agent_output import agent_error, agent_success from pipelex.config import get_config -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError from pipelex.core.interpreter.helpers import is_pipelex_file from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError @@ -19,13 +19,13 @@ from pipelex.pipe_run.pipe_run_mode import PipeRunMode from pipelex.pipelex import Pipelex from pipelex.pipeline.exceptions import PipelineExecutionError -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.tools.misc.json_utils import JsonTypeError, load_json_dict_from_path async def _run_pipeline_core( pipe_code: str, - plx_content: str | None = None, + mthds_content: str | None = None, bundle_uri: str | None = None, inputs: dict[str, Any] | None = None, dry_run: bool = False, @@ -37,7 +37,7 @@ async def _run_pipeline_core( Args: pipe_code: The pipe code to run. - plx_content: PLX content string (optional). + mthds_content: MTHDS content string (optional). bundle_uri: Bundle file path (optional). inputs: Input dictionary for the pipeline. dry_run: Whether to run in dry mode (no actual inference). @@ -58,15 +58,18 @@ async def _run_pipeline_core( mock_inputs=mock_inputs or None, ) - pipe_output = await execute_pipeline( - pipe_code=pipe_code, - plx_content=plx_content, + runner = PipelexRunner( bundle_uri=bundle_uri, - inputs=inputs, pipe_run_mode=pipe_run_mode, execution_config=execution_config, library_dirs=library_dirs, ) + response = await runner.execute_pipeline( + pipe_code=pipe_code, + mthds_content=mthds_content, + inputs=inputs, + ) + pipe_output = response.pipe_output main_stuff = pipe_output.working_memory.get_optional_main_stuff() main_stuff_json: dict[str, Any] = {} @@ -138,7 +141,7 @@ def run_cmd( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, inputs: Annotated[ str | None, @@ -158,7 +161,7 @@ def run_cmd( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Execute a pipeline and output JSON results. @@ -167,9 +170,9 @@ def run_cmd( Examples: pipelex-agent run my_pipe --inputs data.json - pipelex-agent run my_bundle.plx --pipe my_pipe + pipelex-agent run my_bundle.mthds --pipe my_pipe pipelex-agent run my_pipe --dry-run --mock-inputs - pipelex-agent run my_bundle.plx --graph + pipelex-agent run my_bundle.mthds --graph """ # Validate that at least one target is provided provided_options = sum([target is not None, pipe is not None, bundle is not None]) @@ -203,13 +206,13 @@ def run_cmd( if not pipe_code and not bundle_path: agent_error("No pipe code or bundle file specified", "ArgumentError") - # Load plx content from bundle if provided - plx_content: str | None = None + # Load MTHDS content from bundle if provided + mthds_content: str | None = None if bundle_path: try: - plx_content = Path(bundle_path).read_text(encoding="utf-8") + mthds_content = Path(bundle_path).read_text(encoding="utf-8") if not pipe_code: - bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) main_pipe_code = bundle_blueprint.main_pipe if not main_pipe_code: agent_error( @@ -221,7 +224,7 @@ def run_cmd( agent_error(f"Bundle file not found: {bundle_path}", "FileNotFoundError", cause=exc) except (OSError, UnicodeDecodeError) as exc: agent_error(f"Failed to read bundle file '{bundle_path}': {exc}", type(exc).__name__, cause=exc) - except (PipelexInterpreterError, PLXDecodeError) as exc: + except (PipelexInterpreterError, MthdsDecodeError) as exc: agent_error(f"Failed to parse bundle '{bundle_path}': {exc}", type(exc).__name__, cause=exc) # Load inputs if provided @@ -246,7 +249,7 @@ def run_cmd( result = asyncio.run( _run_pipeline_core( pipe_code=pipe_code, # type: ignore[arg-type] - plx_content=plx_content, + mthds_content=mthds_content, bundle_uri=bundle_path, inputs=pipeline_inputs, dry_run=dry_run, diff --git a/pipelex/cli/agent_cli/commands/validate_cmd.py b/pipelex/cli/agent_cli/commands/validate_cmd.py index 07064b3e4..b9b5c3af1 100644 --- a/pipelex/cli/agent_cli/commands/validate_cmd.py +++ b/pipelex/cli/agent_cli/commands/validate_cmd.py @@ -76,7 +76,7 @@ async def _validate_bundle_core( Raises: ValidateBundleError: If validation fails. """ - result = await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + result = await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) validated_pipes = [{"pipe_code": the_pipe.code, "status": "SUCCESS"} for the_pipe in result.pipes] @@ -145,7 +145,7 @@ async def _validate_pipe_in_bundle_core( """ # Validate the bundle to load all its pipes into the library # This ensures all dependencies are available - await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) # Now get the specific pipe and dry-run only that one the_pipe = get_required_pipe(pipe_code=pipe_code) @@ -170,7 +170,7 @@ def validate_cmd( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, validate_all: Annotated[ bool, @@ -178,7 +178,7 @@ def validate_cmd( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Validate a pipe, bundle, or all pipes and output JSON results. @@ -187,7 +187,7 @@ def validate_cmd( Examples: pipelex-agent validate my_pipe - pipelex-agent validate my_bundle.plx + pipelex-agent validate my_bundle.mthds pipelex-agent validate --all -L ./my_pipes """ library_dirs = [Path(lib_dir) for lib_dir in library_dir] if library_dir else None diff --git a/pipelex/cli/commands/build/app.py b/pipelex/cli/commands/build/app.py index a135cdaf0..8aa94dc88 100644 --- a/pipelex/cli/commands/build/app.py +++ b/pipelex/cli/commands/build/app.py @@ -13,4 +13,4 @@ build_app.command("output", help="Generate example output representation for a pipe (JSON, Python, or TypeScript)")(generate_output_cmd) build_app.command("pipe", help="Build a Pipelex bundle with one validation/fix loop correcting deterministic issues")(build_pipe_cmd) build_app.command("runner", help="Build the Python code to run a pipe with the necessary inputs")(prepare_runner_cmd) -build_app.command("structures", help="Generate Python structure files from concept definitions in PLX files")(build_structures_command) +build_app.command("structures", help="Generate Python structure files from concept definitions in MTHDS files")(build_structures_command) diff --git a/pipelex/cli/commands/build/inputs_cmd.py b/pipelex/cli/commands/build/inputs_cmd.py index af3d1bbc5..8bfe589c0 100644 --- a/pipelex/cli/commands/build/inputs_cmd.py +++ b/pipelex/cli/commands/build/inputs_cmd.py @@ -41,12 +41,12 @@ async def _generate_inputs_core( Args: pipe_code: The pipe code to generate inputs for. - bundle_path: Path to the bundle file (.plx). + bundle_path: Path to the bundle file (.mthds). output_path: Path to save the generated JSON file. """ if bundle_path: try: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path) bundle_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: # No pipe code specified, use main_pipe from bundle @@ -100,7 +100,7 @@ async def _generate_inputs_core( if output_path: final_output_path = output_path elif bundle_path: - # Place inputs.json in the same directory as the PLX file + # Place inputs.json in the same directory as the MTHDS file bundle_dir = bundle_path.parent final_output_path = bundle_dir / DEFAULT_INPUTS_FILE_NAME else: @@ -123,14 +123,14 @@ def generate_inputs_cmd( ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.plx) that declares a main pipe"), + typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.mthds) that declares a main pipe"), ] = None, library_dir: Annotated[ list[str] | None, typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, output_path: Annotated[ @@ -147,8 +147,8 @@ def generate_inputs_cmd( Examples: pipelex build inputs my_pipe - pipelex build inputs my_bundle.plx - pipelex build inputs my_bundle.plx --pipe my_pipe + pipelex build inputs my_bundle.mthds + pipelex build inputs my_bundle.mthds --pipe my_pipe pipelex build inputs my_pipe --output custom_inputs.json pipelex build inputs my_pipe -L ./my_pipes """ @@ -167,7 +167,7 @@ def generate_inputs_cmd( target_path = Path(target) if target_path.is_dir(): typer.secho( - f"Failed to run: '{target}' is a directory. The inputs command requires a .plx file or a pipe code.", + f"Failed to run: '{target}' is a directory. The inputs command requires a .mthds file or a pipe code.", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/commands/build/output_cmd.py b/pipelex/cli/commands/build/output_cmd.py index c54597ee2..f834f2659 100644 --- a/pipelex/cli/commands/build/output_cmd.py +++ b/pipelex/cli/commands/build/output_cmd.py @@ -42,13 +42,13 @@ async def _generate_output_core( Args: pipe_code: The pipe code to generate output for. - bundle_path: Path to the bundle file (.plx). + bundle_path: Path to the bundle file (.mthds). output_path: Path to save the generated file. output_format: The format to generate (JSON, PYTHON, or SCHEMA). """ if bundle_path: try: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path) bundle_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: # No pipe code specified, use main_pipe from bundle @@ -102,7 +102,7 @@ async def _generate_output_core( if output_path: final_output_path = output_path elif bundle_path: - # Place output file in the same directory as the PLX file + # Place output file in the same directory as the MTHDS file bundle_dir = Path(bundle_path).parent match output_format: case ConceptRepresentationFormat.JSON: @@ -137,14 +137,14 @@ def generate_output_cmd( ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.plx) that declares a main pipe"), + typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.mthds) that declares a main pipe"), ] = None, library_dir: Annotated[ list[str] | None, typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, output_path: Annotated[ @@ -183,9 +183,9 @@ def generate_output_cmd( pipelex build output my_pipe --format schema - pipelex build output my_bundle.plx + pipelex build output my_bundle.mthds - pipelex build output my_bundle.plx --pipe my_pipe + pipelex build output my_bundle.mthds --pipe my_pipe pipelex build output my_pipe --output custom_output.json @@ -222,7 +222,7 @@ def generate_output_cmd( target_path = Path(target) if target_path.is_dir(): typer.secho( - f"Failed to run: '{target}' is a directory. The output command requires a .plx file or a pipe code.", + f"Failed to run: '{target}' is a directory. The output command requires a .mthds file or a pipe code.", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/commands/build/pipe_cmd.py b/pipelex/cli/commands/build/pipe_cmd.py index c0be5206a..cbdc57737 100644 --- a/pipelex/cli/commands/build/pipe_cmd.py +++ b/pipelex/cli/commands/build/pipe_cmd.py @@ -9,7 +9,7 @@ from pipelex import log from pipelex.builder.builder_errors import PipeBuilderError -from pipelex.builder.builder_loop import BuilderLoop +from pipelex.builder.builder_loop import BuilderLoop, maybe_generate_manifest_for_output from pipelex.builder.conventions import DEFAULT_INPUTS_FILE_NAME from pipelex.builder.exceptions import PipelexBundleSpecBlueprintError from pipelex.builder.runner_code import generate_runner_code @@ -22,15 +22,16 @@ handle_model_choice_error, ) from pipelex.config import get_config +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError from pipelex.core.pipes.variable_multiplicity import parse_concept_with_multiplicity from pipelex.graph.graph_factory import generate_graph_outputs, save_graph_outputs_to_dir from pipelex.hub import get_console, get_report_delegate, get_required_pipe, get_telemetry_manager -from pipelex.language.plx_factory import PlxFactory +from pipelex.language.mthds_factory import MthdsFactory from pipelex.pipe_operators.exceptions import PipeOperatorModelAvailabilityError from pipelex.pipe_run.pipe_run_mode import PipeRunMode from pipelex.pipelex import PACKAGE_VERSION, Pipelex -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.pipeline.validate_bundle import ValidateBundleError from pipelex.system.runtime import IntegrationMode from pipelex.system.telemetry.events import EventProperty @@ -99,7 +100,7 @@ def build_pipe_cmd( ] = False, no_extras: Annotated[ bool, - typer.Option("--no-extras", help="Skip generating inputs.json and runner.py, only generate the PLX file"), + typer.Option("--no-extras", help="Skip generating inputs.json and runner.py, only generate the MTHDS file"), ] = False, bundle_view: Annotated[ bool, @@ -168,43 +169,48 @@ async def run_pipeline(): base_dir = output_dir or builder_config.default_output_dir # Determine output path and whether to generate extras - bundle_file_name = Path(f"{builder_config.default_bundle_file_name}.plx") + bundle_file_name = Path(f"{builder_config.default_bundle_file_name}{MTHDS_EXTENSION}") if no_extras: - # Generate single file: {base_dir}/{name}_01.plx + # Generate single file: {base_dir}/{name}_01.mthds name = output_name or builder_config.default_bundle_file_name - plx_file_path = get_incremental_file_path( + mthds_file_path = get_incremental_file_path( base_path=base_dir, base_name=name, - extension="plx", + extension="mthds", ) extras_output_dir = "" # Not used in no_extras mode else: - # Generate directory with extras: {base_dir}/{name}_01/bundle.plx + extras + # Generate directory with extras: {base_dir}/{name}_01/bundle.mthds + extras dir_name = output_name or builder_config.default_directory_base_name extras_output_dir = get_incremental_directory_path( base_path=base_dir, base_name=dir_name, ) - plx_file_path = Path(extras_output_dir) / bundle_file_name + mthds_file_path = Path(extras_output_dir) / bundle_file_name - # Save the PLX file - ensure_directory_for_file_path(file_path=str(plx_file_path)) + # Save the MTHDS file + ensure_directory_for_file_path(file_path=str(mthds_file_path)) try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) except PipelexBundleSpecBlueprintError as exc: typer.secho(f"❌ Failed to convert bundle spec to blueprint: {exc}", fg=typer.colors.RED) raise typer.Exit(1) from exc - save_text_to_path(text=plx_content, path=str(plx_file_path)) - log.verbose(f"Pipelex bundle saved to: {plx_file_path}") + save_text_to_path(text=mthds_content, path=str(mthds_file_path)) + log.verbose(f"Pipelex bundle saved to: {mthds_file_path}") if no_extras: end_time = time.time() console = get_console() console.print(f"\n[green]βœ“[/green] [bold]Pipeline built successfully ({end_time - start_time:.1f}s)[/bold]") - console.print(f" Output: {plx_file_path}") + console.print(f" Output: {mthds_file_path}") return + # Generate METHODS.toml if multiple domains exist in output dir + manifest_path = maybe_generate_manifest_for_output(output_dir=Path(extras_output_dir)) + if manifest_path: + log.verbose(f"Package manifest generated: {manifest_path}") + # Generate extras (inputs and runner) main_pipe_code = pipelex_bundle_spec.main_pipe domain_code = pipelex_bundle_spec.domain @@ -293,12 +299,15 @@ async def run_pipeline(): # pass empty library_dirs to avoid loading any libraries set at env var or instance level: # we don't want any other pipeline to interfere with the pipeline we just built - built_pipe_output = await execute_pipeline( - plx_content=plx_content, + built_runner = PipelexRunner( pipe_run_mode=PipeRunMode.DRY, execution_config=built_pipe_execution_config, library_dirs=[], ) + built_pipe_response = await built_runner.execute_pipeline( + mthds_content=mthds_content, + ) + built_pipe_output = built_pipe_response.pipe_output if built_pipe_output.graph_spec: pipeline_graph_dir = graphs_dir / "pipeline_graph" log.verbose(f"Saving pipeline graph for pipe {main_pipe_code} to {pipeline_graph_dir}") @@ -319,7 +328,7 @@ async def run_pipeline(): console = get_console() console.print(f"\n[green]βœ“[/green] [bold]Pipeline built successfully ({end_time - start_time:.1f}s)[/bold]") console.print(f" Output saved to [bold magenta]{extras_output_dir}[/bold magenta]:") - console.print(f" [green]βœ“[/green] bundle.plx β†’ {domain_code} β†’ main pipe [red]{main_pipe_code}[/red]") + console.print(f" [green]βœ“[/green] bundle.mthds β†’ {domain_code} β†’ main pipe [red]{main_pipe_code}[/red]") if saved_bundle_view_formats: console.print(f" [green]βœ“[/green] bundle_view: {', '.join(saved_bundle_view_formats)}") if saved_structure_names: diff --git a/pipelex/cli/commands/build/runner_cmd.py b/pipelex/cli/commands/build/runner_cmd.py index 7d52bb3ae..9e3d53956 100644 --- a/pipelex/cli/commands/build/runner_cmd.py +++ b/pipelex/cli/commands/build/runner_cmd.py @@ -49,7 +49,7 @@ async def prepare_runner( if bundle_path: try: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) all_blueprints.extend(validate_bundle_result.blueprints) first_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: @@ -88,7 +88,7 @@ async def prepare_runner( if output_path: final_output_path = output_path else: - # Place runner in the same directory as the PLX file + # Place runner in the same directory as the MTHDS file bundle_dir = Path(bundle_path).parent final_output_path = bundle_dir / f"run_{pipe_code}.py" output_dir = Path(final_output_path).parent @@ -161,11 +161,11 @@ async def prepare_runner( def prepare_runner_cmd( target: Annotated[ str | None, - typer.Argument(help="Bundle file path (.plx)"), + typer.Argument(help="Bundle file path (.mthds)"), ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code to use (optional if the .plx declares a main_pipe)"), + typer.Option("--pipe", help="Pipe code to use (optional if the .mthds declares a main_pipe)"), ] = None, output_path: Annotated[ str | None, @@ -173,7 +173,7 @@ def prepare_runner_cmd( ] = None, library_dirs: Annotated[ list[str] | None, - typer.Option("--library-dirs", "-L", help="Directories to search for pipe definitions (.plx files). Can be specified multiple times."), + typer.Option("--library-dirs", "-L", help="Directories to search for pipe definitions (.mthds files). Can be specified multiple times."), ] = None, ) -> None: """Prepare a Python runner file for a pipe. @@ -186,9 +186,9 @@ def prepare_runner_cmd( Custom concept types will have their structure recursively generated. Examples: - pipelex build runner my_bundle.plx - pipelex build runner my_bundle.plx --pipe my_pipe - pipelex build runner my_bundle.plx --output runner.py + pipelex build runner my_bundle.mthds + pipelex build runner my_bundle.mthds --pipe my_pipe + pipelex build runner my_bundle.mthds --output runner.py """ # Show help if no target provided if target is None: @@ -201,10 +201,10 @@ def prepare_runner_cmd( output_path_path = Path(output_path) if output_path else None library_dirs_paths = [Path(lib_dir) for lib_dir in library_dirs] if library_dirs else None - # Validate: target must be a .plx file + # Validate: target must be a .mthds file if not is_pipelex_file(target_path): typer.secho( - f"Failed to run: '{target}' is not a .plx file.", + f"Failed to run: '{target}' is not a .mthds file.", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/commands/build/structures_cmd.py b/pipelex/cli/commands/build/structures_cmd.py index 46692cc83..c979864a0 100644 --- a/pipelex/cli/commands/build/structures_cmd.py +++ b/pipelex/cli/commands/build/structures_cmd.py @@ -293,7 +293,7 @@ def generate_structures_from_blueprints( def build_structures_command( target: Annotated[ str, - typer.Argument(help="Target directory to scan for .plx files, or a specific .plx file"), + typer.Argument(help="Target directory to scan for .mthds files, or a specific .mthds file"), ], output_dir: Annotated[ str | None, @@ -304,7 +304,7 @@ def build_structures_command( typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, force: Annotated[ @@ -316,14 +316,14 @@ def build_structures_command( ), ] = False, ) -> None: - """Generate Python structure classes from concept definitions in .plx files. + """Generate Python structure classes from concept definitions in .mthds files. Examples: - pipelex build structures my_bundle.plx + pipelex build structures my_bundle.mthds pipelex build structures ./my_pipes/ - pipelex build structures my_bundle.plx -o ./generated/ - pipelex build structures my_bundle.plx -L ./shared_pipes/ - pipelex build structures my_bundle.plx --force + pipelex build structures my_bundle.mthds -o ./generated/ + pipelex build structures my_bundle.mthds -L ./shared_pipes/ + pipelex build structures my_bundle.mthds --force """ def _build_structures_cmd(): @@ -337,19 +337,19 @@ def _build_structures_cmd(): library_dirs_paths, _ = resolve_library_dirs(library_dir) # Determine if target is a file or directory - is_plx_file = target_path.is_file() and is_pipelex_file(target_path) + is_mthds_file = target_path.is_file() and is_pipelex_file(target_path) pipelex_instance = make_pipelex_for_cli(context=ErrorContext.BUILD, library_dirs=library_dir) try: - if is_plx_file: - # Single PLX file: output to parent directory + if is_mthds_file: + # Single MTHDS file: output to parent directory base_dir = target_path.parent output_directory = Path(output_dir) if output_dir else base_dir / "structures" typer.echo(f"πŸ” Loading concepts from bundle: {target_path}") # Load concepts only (no pipes) - load_result = load_concepts_only(plx_file_path=target_path, library_dirs=library_dirs_paths) + load_result = load_concepts_only(mthds_file_path=target_path, library_dirs=library_dirs_paths) # THIS IS A HACK, while waiting class/func registries to be in libraries. get_class_registry().teardown() get_func_registry().teardown() @@ -367,9 +367,9 @@ def _build_structures_cmd(): skip_existing_check=force, ) else: - # Directory: scan for all PLX files + # Directory: scan for all MTHDS files if not target_path.is_dir(): - typer.secho(f"❌ Target is not a directory or .plx file: {target_path}", fg=typer.colors.RED, err=True) + typer.secho(f"❌ Target is not a directory or .mthds file: {target_path}", fg=typer.colors.RED, err=True) raise typer.Exit(1) output_directory = Path(output_dir) if output_dir else target_path / "structures" diff --git a/pipelex/cli/commands/init/backends.py b/pipelex/cli/commands/init/backends.py index f1cd0875c..06b2d4e06 100644 --- a/pipelex/cli/commands/init/backends.py +++ b/pipelex/cli/commands/init/backends.py @@ -3,6 +3,7 @@ import os from typing import Any +from pipelex.cli.commands.init.ide_extension import suggest_extension_install_if_needed from pipelex.cli.commands.init.ui.backends_ui import ( build_backend_selection_panel, display_selected_backends, @@ -125,6 +126,9 @@ def customize_backends_config(is_first_time_setup: bool = False) -> None: is_first_time_setup=is_first_time_setup, ) + # Suggest IDE extension install after backend selection, before gateway terms + suggest_extension_install_if_needed(console) + # Check if pipelex_gateway is selected and handle terms acceptance if PipelexBackend.GATEWAY in selected_backends: gateway_accepted = prompt_gateway_acceptance(console) diff --git a/pipelex/cli/commands/init/command.py b/pipelex/cli/commands/init/command.py index cf6407311..e0657cf5c 100644 --- a/pipelex/cli/commands/init/command.py +++ b/pipelex/cli/commands/init/command.py @@ -82,6 +82,7 @@ def determine_needs( backends_toml_path: str, routing_profiles_toml_path: str, telemetry_config_path: str, + target_config_dir: str | None = None, ) -> tuple[bool, bool, bool, bool]: """Determine what needs to be initialized based on current state. @@ -94,11 +95,12 @@ def determine_needs( backends_toml_path: Path to backends.toml file. routing_profiles_toml_path: Path to routing_profiles.toml file. telemetry_config_path: Path to telemetry config file. + target_config_dir: Explicit target .pipelex directory. If None, uses config_manager.pipelex_config_dir. Returns: Tuple of (needs_config, needs_inference, needs_routing, needs_telemetry) booleans. """ - nb_missing_config_files = init_config(reset=False, dry_run=True) if check_config else 0 + nb_missing_config_files = init_config(reset=False, dry_run=True, target_dir=target_config_dir) if check_config else 0 needs_config = check_config and (nb_missing_config_files > 0 or reset) needs_inference = check_inference and (not path_exists(backends_toml_path) or reset) needs_routing = check_routing and (not path_exists(routing_profiles_toml_path) or reset) @@ -165,6 +167,7 @@ def execute_initialization( backends_toml_path: str, telemetry_config_path: str, is_first_time_backends_setup: bool, + target_config_dir: str | None = None, ): """Execute the initialization steps. @@ -180,6 +183,7 @@ def execute_initialization( backends_toml_path: Path to backends.toml file. telemetry_config_path: Path to telemetry config file. is_first_time_backends_setup: Whether backends.toml didn't exist before this run. + target_config_dir: Explicit target .pipelex directory. If None, uses config_manager.pipelex_config_dir. """ # Track if backends were just copied during config initialization @@ -191,7 +195,7 @@ def execute_initialization( backends_existed_before = path_exists(backends_toml_path) console.print() - init_config(reset=reset) + init_config(reset=reset, target_dir=target_config_dir) # If backends.toml was just created (freshly copied), always prompt for backend selection backends_exists_now = path_exists(backends_toml_path) @@ -215,7 +219,8 @@ def execute_initialization( # If reset is True and we didn't already copy via config init, copy the template files if reset and not backends_just_copied_during_config: template_inference_dir = os.path.join(str(get_kit_configs_dir()), "inference") - target_inference_dir = os.path.join(config_manager.pipelex_config_dir, "inference") + effective_config_dir = target_config_dir or config_manager.pipelex_config_dir + target_inference_dir = os.path.join(effective_config_dir, "inference") # Reset backends.toml template_backends_path = os.path.join(template_inference_dir, "backends.toml") @@ -267,7 +272,8 @@ def execute_initialization( # If reset is True, copy the template file first if reset: - routing_profiles_toml_path = os.path.join(config_manager.pipelex_config_dir, "inference", "routing_profiles.toml") + effective_config_dir_for_routing = target_config_dir or config_manager.pipelex_config_dir + routing_profiles_toml_path = os.path.join(effective_config_dir_for_routing, "inference", "routing_profiles.toml") template_routing_path = os.path.join(str(get_kit_configs_dir()), "inference", "routing_profiles.toml") shutil.copy2(template_routing_path, routing_profiles_toml_path) console.print("βœ… Reset routing_profiles.toml from template") @@ -339,6 +345,7 @@ def _init_agreement(console: Console) -> None: def init_cmd( focus: InitFocus = InitFocus.ALL, skip_confirmation: bool = False, + local: bool = False, ): """Initialize Pipelex configuration, inference backends, routing, and telemetry. @@ -348,6 +355,7 @@ def init_cmd( Args: focus: What to initialize - 'agreement', 'config', 'inference', 'routing', 'telemetry', or 'all' (default) skip_confirmation: If True, skip the confirmation prompt (used when called from doctor --fix) + local: If True, create project-level .pipelex/ at the detected project root. Otherwise, create global ~/.pipelex/. """ console = get_console() @@ -358,7 +366,21 @@ def init_cmd( # Config updates are not yet supported - always reset reset = True - pipelex_config_dir = config_manager.pipelex_config_dir + + # Determine target directory + if local: + # --local: create at project root, fall back to CWD + project_root = config_manager.project_root + if project_root is not None: + target_config_dir = os.path.join(project_root, ".pipelex") + else: + target_config_dir = os.path.join(os.getcwd(), ".pipelex") + else: + # Default: create global config at ~/.pipelex/ + target_config_dir = config_manager.global_config_dir + console.print(f"[dim]Target directory: {target_config_dir}[/dim]") + + pipelex_config_dir = target_config_dir telemetry_config_path = os.path.join(pipelex_config_dir, TELEMETRY_CONFIG_FILE_NAME) backends_toml_path = os.path.join(pipelex_config_dir, "inference", "backends.toml") routing_profiles_toml_path = os.path.join(pipelex_config_dir, "inference", "routing_profiles.toml") @@ -382,6 +404,7 @@ def init_cmd( backends_toml_path=backends_toml_path, routing_profiles_toml_path=routing_profiles_toml_path, telemetry_config_path=telemetry_config_path, + target_config_dir=pipelex_config_dir, ) # Show info message if config already exists @@ -418,6 +441,7 @@ def init_cmd( backends_toml_path=backends_toml_path, telemetry_config_path=telemetry_config_path, is_first_time_backends_setup=is_first_time_backends_setup, + target_config_dir=pipelex_config_dir, ) except typer.Exit: diff --git a/pipelex/cli/commands/init/config_files.py b/pipelex/cli/commands/init/config_files.py index 56392ded7..a76272fab 100644 --- a/pipelex/cli/commands/init/config_files.py +++ b/pipelex/cli/commands/init/config_files.py @@ -15,18 +15,19 @@ INIT_SKIP_FILES: frozenset[str] = GIT_IGNORED_CONFIG_FILES | {TELEMETRY_CONFIG_FILE_NAME, ".DS_Store"} -def init_config(reset: bool = False, dry_run: bool = False) -> int: +def init_config(reset: bool = False, dry_run: bool = False, target_dir: str | None = None) -> int: """Initialize pipelex configuration in the .pipelex directory. Does not install telemetry, just the main config and inference backends. Args: reset: Whether to overwrite existing files. dry_run: Whether to only print the files that would be copied, without actually copying them. + target_dir: Explicit target directory. If None, uses config_manager.pipelex_config_dir. Returns: The number of files copied. """ config_template_dir = str(get_kit_configs_dir()) - target_config_dir = config_manager.pipelex_config_dir + target_config_dir = target_dir or config_manager.pipelex_config_dir os.makedirs(target_config_dir, exist_ok=True) diff --git a/pipelex/cli/commands/init/ide_extension.py b/pipelex/cli/commands/init/ide_extension.py new file mode 100644 index 000000000..4d2d6cefd --- /dev/null +++ b/pipelex/cli/commands/init/ide_extension.py @@ -0,0 +1,103 @@ +"""IDE extension detection and installation for Pipelex.""" + +import shutil +import subprocess # noqa: S404 +from pathlib import Path + +from rich.console import Console +from rich.prompt import Confirm + +EXTENSION_FOLDER_PATTERN = "pipelex.pipelex-*" +EXTENSION_ID = "Pipelex.pipelex" +EXTENSION_URL = "https://open-vsx.org/extension/Pipelex/pipelex" + +_IDE_COMMANDS = { + "VS Code": "code", + "Cursor": "cursor", +} + +_EXTENSIONS_DIRS = { + "VS Code": Path.home() / ".vscode" / "extensions", + "Cursor": Path.home() / ".cursor" / "extensions", +} + + +def _get_ides_with_extension_installed() -> set[str]: + """Return the set of IDE names that already have the Pipelex extension installed.""" + installed_in: set[str] = set() + for ide_name, extensions_dir in _EXTENSIONS_DIRS.items(): + if extensions_dir.is_dir() and list(extensions_dir.glob(EXTENSION_FOLDER_PATTERN)): + installed_in.add(ide_name) + return installed_in + + +def _get_available_ide_commands() -> dict[str, str]: + """Return a dict of IDE name -> CLI command for IDEs whose CLI is available on PATH.""" + return {ide_name: cmd for ide_name, cmd in _IDE_COMMANDS.items() if shutil.which(cmd) is not None} + + +def _install_extension(ide_name: str, cmd: str, console: Console) -> bool: + """Install the Pipelex extension for a given IDE. + + Args: + ide_name: Human-readable IDE name (e.g. "VS Code"). + cmd: CLI command for the IDE (e.g. "code"). + console: Rich Console instance for output. + + Returns: + True if installation succeeded, False otherwise. + """ + try: + result = subprocess.run( # noqa: S603 + [cmd, "--install-extension", EXTENSION_ID], + capture_output=True, + text=True, + timeout=120, + check=False, + ) + if result.returncode == 0: + console.print(f" [green]βœ“ Installed in {ide_name}[/green]") + return True + else: + console.print(f" [red]βœ— Failed to install in {ide_name}: {result.stderr.strip()}[/red]") + return False + except subprocess.TimeoutExpired: + console.print(f" [red]βœ— Installation timed out for {ide_name}[/red]") + return False + except OSError as exc: + console.print(f" [red]βœ— Could not run '{cmd}' for {ide_name}: {exc}[/red]") + return False + + +def suggest_extension_install_if_needed(console: Console) -> None: + """Check whether the Pipelex IDE extension is installed and offer to install it. + + Checks VS Code and Cursor extension directories for a folder matching + ``pipelex.pipelex-*``. If the extension is not found in any IDE, prints a + suggestion and offers to install it via the IDE CLI commands if available. + """ + installed_in = _get_ides_with_extension_installed() + available_commands = _get_available_ide_commands() + + # Determine which IDEs still need the extension + ides_needing_install = {ide_name: cmd for ide_name, cmd in available_commands.items() if ide_name not in installed_in} + + if not ides_needing_install: + # Either already installed everywhere or no IDE CLI available + return + + ide_names = " and ".join(ides_needing_install.keys()) + console.print(f"πŸ’‘ The Pipelex extension for {ide_names} provides syntax highlighting for .pplx files.") + console.print(f" More info: [cyan]{EXTENSION_URL}[/cyan]") + + install = Confirm.ask( + f"[bold]Install the Pipelex extension in {ide_names}?[/bold]", + console=console, + default=True, + ) + + if install: + for ide_name, cmd in ides_needing_install.items(): + _install_extension(ide_name, cmd, console) + else: + console.print("[dim]You can install it later from the IDE marketplace or the link above.[/dim]") diff --git a/pipelex/cli/commands/init/ui/general_ui.py b/pipelex/cli/commands/init/ui/general_ui.py index 2c22008f8..2f483dd87 100644 --- a/pipelex/cli/commands/init/ui/general_ui.py +++ b/pipelex/cli/commands/init/ui/general_ui.py @@ -23,6 +23,7 @@ def build_initialization_panel(needs_config: bool, needs_inference: bool, needs_ message_parts.append("β€’ [yellow]Reset and reconfigure[/yellow] configuration files in [cyan].pipelex/[/cyan]") if needs_inference: message_parts.append("β€’ [yellow]Reset and reconfigure[/yellow] inference backends") + message_parts.append("β€’ Suggest IDE extension for [cyan].pplx[/cyan] syntax highlighting") if needs_routing: message_parts.append("β€’ [yellow]Reset and reconfigure[/yellow] routing profile") if needs_telemetry: @@ -32,6 +33,7 @@ def build_initialization_panel(needs_config: bool, needs_inference: bool, needs_ message_parts.append("β€’ Create required configuration files in [cyan].pipelex/[/cyan]") if needs_inference: message_parts.append("β€’ Ask you to choose your inference backends") + message_parts.append("β€’ Suggest IDE extension for [cyan].pplx[/cyan] syntax highlighting") if needs_routing: message_parts.append("β€’ Ask you to configure your routing profile") if needs_telemetry: diff --git a/pipelex/client/__init__.py b/pipelex/cli/commands/pkg/__init__.py similarity index 100% rename from pipelex/client/__init__.py rename to pipelex/cli/commands/pkg/__init__.py diff --git a/pipelex/cli/commands/pkg/add_cmd.py b/pipelex/cli/commands/pkg/add_cmd.py new file mode 100644 index 000000000..69533e008 --- /dev/null +++ b/pipelex/cli/commands/pkg/add_cmd.py @@ -0,0 +1,95 @@ +import re +from pathlib import Path + +import typer + +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.exceptions import ManifestError +from pipelex.core.packages.manifest import PackageDependency +from pipelex.core.packages.manifest_parser import parse_methods_toml, serialize_manifest_to_toml +from pipelex.hub import get_console + + +def derive_alias_from_address(address: str) -> str: + """Derive a snake_case alias from a package address. + + Takes the last path segment and converts hyphens/dots to underscores. + + Args: + address: The package address (e.g. "github.com/org/my-package") + + Returns: + A snake_case alias (e.g. "my_package") + """ + last_segment = address.rstrip("/").rsplit("/", maxsplit=1)[-1] + # Replace hyphens and dots with underscores, lowercase + alias = re.sub(r"[-.]", "_", last_segment).lower() + # Remove any non-alphanumeric/underscore characters + alias = re.sub(r"[^a-z0-9_]", "", alias) + # Remove leading/trailing underscores + alias = alias.strip("_") + return alias or "dep" + + +def do_pkg_add( + address: str, + alias: str | None = None, + version: str = "0.1.0", + path: str | None = None, +) -> None: + """Add a dependency to METHODS.toml. + + Args: + address: The package address (e.g. "github.com/org/repo") + alias: The dependency alias (auto-derived from address if not provided) + version: The version constraint + path: Optional local filesystem path + """ + console = get_console() + cwd = Path.cwd() + manifest_path = cwd / MANIFEST_FILENAME + + # Check that METHODS.toml exists + if not manifest_path.exists(): + console.print(f"[red]{MANIFEST_FILENAME} not found in current directory.[/red]") + console.print("Run [bold]pipelex pkg init[/bold] first to create a manifest.") + raise typer.Exit(code=1) + + # Parse existing manifest + content = manifest_path.read_text(encoding="utf-8") + try: + manifest = parse_methods_toml(content) + except ManifestError as exc: + console.print(f"[red]Could not parse {MANIFEST_FILENAME}: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + # Auto-derive alias if not provided + if alias is None: + alias = derive_alias_from_address(address) + console.print(f"[dim]Auto-derived alias: {alias}[/dim]") + + # Check alias uniqueness + existing_aliases = {dep.alias for dep in manifest.dependencies} + if alias in existing_aliases: + console.print(f"[red]Dependency alias '{alias}' already exists in {MANIFEST_FILENAME}.[/red]") + raise typer.Exit(code=1) + + # Create and validate the dependency + try: + dep = PackageDependency( + address=address, + version=version, + alias=alias, + path=path, + ) + except ValueError as exc: + console.print(f"[red]Invalid dependency: {exc}[/red]") + raise typer.Exit(code=1) from exc + + # Add to manifest and write back + manifest.dependencies.append(dep) + toml_content = serialize_manifest_to_toml(manifest) + manifest_path.write_text(toml_content, encoding="utf-8") + + path_info = f" (path: {path})" if path else "" + console.print(f"[green]Added dependency '{alias}' -> {address} @ {version}{path_info}[/green]") diff --git a/pipelex/cli/commands/pkg/app.py b/pipelex/cli/commands/pkg/app.py new file mode 100644 index 000000000..dff477633 --- /dev/null +++ b/pipelex/cli/commands/pkg/app.py @@ -0,0 +1,180 @@ +from typing import Annotated + +import typer + +from pipelex.cli.commands.pkg.add_cmd import do_pkg_add +from pipelex.cli.commands.pkg.graph_cmd import do_pkg_graph +from pipelex.cli.commands.pkg.index_cmd import do_pkg_index +from pipelex.cli.commands.pkg.init_cmd import do_pkg_init +from pipelex.cli.commands.pkg.inspect_cmd import do_pkg_inspect +from pipelex.cli.commands.pkg.install_cmd import do_pkg_install +from pipelex.cli.commands.pkg.list_cmd import do_pkg_list +from pipelex.cli.commands.pkg.lock_cmd import do_pkg_lock +from pipelex.cli.commands.pkg.publish_cmd import do_pkg_publish +from pipelex.cli.commands.pkg.search_cmd import do_pkg_search +from pipelex.cli.commands.pkg.update_cmd import do_pkg_update + +pkg_app = typer.Typer( + no_args_is_help=True, +) + + +@pkg_app.command("init", help="Initialize a METHODS.toml package manifest from .mthds files in the current directory") +def pkg_init_cmd( + force: Annotated[ + bool, + typer.Option("--force", "-f", help="Overwrite existing METHODS.toml"), + ] = False, +) -> None: + """Scan .mthds files and generate a skeleton METHODS.toml.""" + do_pkg_init(force=force) + + +@pkg_app.command("list", help="Display the package manifest (METHODS.toml) for the current directory") +def pkg_list_cmd() -> None: + """Show the package manifest if one exists.""" + do_pkg_list() + + +@pkg_app.command("add", help="Add a dependency to METHODS.toml") +def pkg_add_cmd( + address: Annotated[ + str, + typer.Argument(help="Package address (e.g. 'github.com/org/repo')"), + ], + alias: Annotated[ + str | None, + typer.Option("--alias", "-a", help="Dependency alias (auto-derived from address if not provided)"), + ] = None, + version: Annotated[ + str, + typer.Option("--version", "-v", help="Version constraint"), + ] = "0.1.0", + path: Annotated[ + str | None, + typer.Option("--path", "-p", help="Local filesystem path to the dependency"), + ] = None, +) -> None: + """Add a dependency to the package manifest.""" + do_pkg_add(address=address, alias=alias, version=version, path=path) + + +@pkg_app.command("lock", help="Resolve dependencies and generate methods.lock") +def pkg_lock_cmd() -> None: + """Resolve all dependencies and write a lock file.""" + do_pkg_lock() + + +@pkg_app.command("install", help="Install dependencies from methods.lock") +def pkg_install_cmd() -> None: + """Fetch packages recorded in the lock file.""" + do_pkg_install() + + +@pkg_app.command("update", help="Re-resolve dependencies and update methods.lock") +def pkg_update_cmd() -> None: + """Fresh resolve of all dependencies and rewrite the lock file.""" + do_pkg_update() + + +@pkg_app.command("index", help="Build and display the package index") +def pkg_index_cmd( + cache: Annotated[ + bool, + typer.Option("--cache", "-c", help="Index cached packages instead of current project"), + ] = False, +) -> None: + """Build and display the package index.""" + do_pkg_index(cache=cache) + + +@pkg_app.command("search", help="Search the package index for concepts and pipes") +def pkg_search_cmd( + query: Annotated[ + str | None, + typer.Argument(help="Search term (case-insensitive substring match)"), + ] = None, + domain: Annotated[ + str | None, + typer.Option("--domain", "-d", help="Filter to specific domain"), + ] = None, + concept: Annotated[ + bool, + typer.Option("--concept", help="Show only matching concepts"), + ] = False, + pipe: Annotated[ + bool, + typer.Option("--pipe", help="Show only matching pipes"), + ] = False, + cache: Annotated[ + bool, + typer.Option("--cache", "-c", help="Search cached packages"), + ] = False, + accepts: Annotated[ + str | None, + typer.Option("--accepts", help="Find pipes that accept this concept (type-compatible search)"), + ] = None, + produces: Annotated[ + str | None, + typer.Option("--produces", help="Find pipes that produce this concept (type-compatible search)"), + ] = None, +) -> None: + """Search the package index for concepts and pipes matching a query.""" + do_pkg_search(query=query, domain=domain, concept_only=concept, pipe_only=pipe, cache=cache, accepts=accepts, produces=produces) + + +@pkg_app.command("inspect", help="Display detailed information about a package") +def pkg_inspect_cmd( + address: Annotated[ + str, + typer.Argument(help="Package address to inspect"), + ], + cache: Annotated[ + bool, + typer.Option("--cache", "-c", help="Look in cache"), + ] = False, +) -> None: + """Display detailed information about a single package.""" + do_pkg_inspect(address=address, cache=cache) + + +@pkg_app.command("graph", help="Query the know-how graph for concept/pipe relationships") +def pkg_graph_cmd( + from_concept: Annotated[ + str | None, + typer.Option("--from", "-f", help="Concept ID (package::concept_ref) β€” find pipes that accept it"), + ] = None, + to_concept: Annotated[ + str | None, + typer.Option("--to", "-t", help="Concept ID β€” find pipes that produce it"), + ] = None, + check: Annotated[ + str | None, + typer.Option("--check", help="Two pipe keys comma-separated β€” check compatibility"), + ] = None, + max_depth: Annotated[ + int, + typer.Option("--max-depth", "-m", help="Max chain depth for --from + --to together"), + ] = 3, + cache: Annotated[ + bool, + typer.Option("--cache", "-c", help="Use cached packages"), + ] = False, + compose: Annotated[ + bool, + typer.Option("--compose", help="Show MTHDS composition template (requires --from and --to)"), + ] = False, +) -> None: + """Query the know-how graph for concept/pipe relationships.""" + do_pkg_graph(from_concept=from_concept, to_concept=to_concept, check=check, max_depth=max_depth, cache=cache, compose=compose) + + +@pkg_app.command("publish", help="Validate package readiness for distribution") +def pkg_publish_cmd( + tag: Annotated[ + bool, + typer.Option("--tag", help="Create git tag v{version} locally on success"), + ] = False, +) -> None: + """Validate that the package is ready for distribution.""" + do_pkg_publish(tag=tag) diff --git a/pipelex/cli/commands/pkg/graph_cmd.py b/pipelex/cli/commands/pkg/graph_cmd.py new file mode 100644 index 000000000..80de531f1 --- /dev/null +++ b/pipelex/cli/commands/pkg/graph_cmd.py @@ -0,0 +1,249 @@ +from pathlib import Path + +import typer +from rich import box +from rich.console import Console +from rich.table import Table + +from pipelex.core.packages.exceptions import GraphBuildError, IndexBuildError +from pipelex.core.packages.graph.chain_formatter import format_chain_as_mthds_snippet +from pipelex.core.packages.graph.graph_builder import build_know_how_graph +from pipelex.core.packages.graph.models import ConceptId, KnowHowGraph, PipeNode +from pipelex.core.packages.graph.query_engine import KnowHowQueryEngine +from pipelex.core.packages.index.index_builder import build_index_from_cache, build_index_from_project +from pipelex.hub import get_console + + +def _parse_concept_id(raw: str) -> ConceptId: + """Parse a concept ID string in the format 'package_address::concept_ref'. + + Args: + raw: String like '__native__::native.Text' or 'github.com/org/repo::domain.Concept' + + Returns: + A ConceptId instance. + + Raises: + typer.Exit: If the format is invalid. + """ + console = get_console() + + if "::" not in raw: + console.print(f"[red]Invalid concept format: '{raw}'[/red]") + console.print("[dim]Expected format: package_address::concept_ref (e.g. __native__::native.Text)[/dim]") + raise typer.Exit(code=1) + + if raw.count("::") > 1: + console.print(f"[red]Invalid concept format: '{raw}' contains multiple '::' separators.[/red]") + console.print("[dim]Expected format: package_address::concept_ref (e.g. __native__::native.Text)[/dim]") + raise typer.Exit(code=1) + + separator_index = raw.index("::") + package_address = raw[:separator_index] + concept_ref = raw[separator_index + 2 :] + + if not package_address or not concept_ref: + console.print(f"[red]Invalid concept format: '{raw}' β€” both package_address and concept_ref must be non-empty.[/red]") + console.print("[dim]Expected format: package_address::concept_ref (e.g. __native__::native.Text)[/dim]") + raise typer.Exit(code=1) + + return ConceptId(package_address=package_address, concept_ref=concept_ref) + + +def do_pkg_graph( + from_concept: str | None = None, + to_concept: str | None = None, + check: str | None = None, + max_depth: int = 3, + cache: bool = False, + compose: bool = False, +) -> None: + """Query the know-how graph for concept/pipe relationships. + + Args: + from_concept: Concept ID to find pipes that accept it. + to_concept: Concept ID to find pipes that produce it. + check: Two pipe keys comma-separated to check compatibility. + max_depth: Max chain depth for --from + --to together. + cache: Use cached packages instead of the current project. + compose: Show MTHDS composition template (requires --from and --to). + """ + console = get_console() + + if compose and (not from_concept or not to_concept): + console.print("[red]--compose requires both --from and --to.[/red]") + raise typer.Exit(code=1) + + if not from_concept and not to_concept and not check: + console.print("[red]Please specify at least one of --from, --to, or --check.[/red]") + console.print("[dim]Run 'pipelex pkg graph --help' for usage.[/dim]") + raise typer.Exit(code=1) + + try: + if cache: + index = build_index_from_cache() + else: + index = build_index_from_project(Path.cwd()) + except IndexBuildError as exc: + console.print(f"[red]Index build error: {exc}[/red]") + raise typer.Exit(code=1) from exc + + if not index.entries: + console.print("[yellow]No packages found.[/yellow]") + raise typer.Exit(code=1) + + try: + graph = build_know_how_graph(index) + except GraphBuildError as exc: + console.print(f"[red]Graph build error: {exc}[/red]") + raise typer.Exit(code=1) from exc + + engine = KnowHowQueryEngine(graph) + + if check: + _handle_check(console, engine, check) + elif from_concept and to_concept: + _handle_from_to(console, engine, graph, from_concept, to_concept, max_depth, compose) + elif from_concept: + _handle_from(console, engine, from_concept) + elif to_concept: + _handle_to(console, engine, to_concept) + + +def _handle_from(console: Console, engine: KnowHowQueryEngine, raw_concept: str) -> None: + """Find pipes that accept the given concept.""" + concept_id = _parse_concept_id(raw_concept) + pipes = engine.query_what_can_i_do(concept_id) + + if not pipes: + console.print(f"[yellow]No pipes accept concept '{raw_concept}'.[/yellow]") + return + + table = Table(title=f"Pipes accepting {raw_concept}", box=box.ROUNDED, show_header=True) + table.add_column("Package", style="cyan") + table.add_column("Pipe") + table.add_column("Type") + table.add_column("Output") + table.add_column("Exported") + + for pipe_node in pipes: + exported_str = "[green]yes[/green]" if pipe_node.is_exported else "[dim]no[/dim]" + table.add_row( + pipe_node.package_address, + pipe_node.pipe_code, + pipe_node.pipe_type, + pipe_node.output_concept_id.concept_ref, + exported_str, + ) + + console.print(table) + + +def _handle_to(console: Console, engine: KnowHowQueryEngine, raw_concept: str) -> None: + """Find pipes that produce the given concept.""" + concept_id = _parse_concept_id(raw_concept) + pipes = engine.query_what_produces(concept_id) + + if not pipes: + console.print(f"[yellow]No pipes produce concept '{raw_concept}'.[/yellow]") + return + + table = Table(title=f"Pipes producing {raw_concept}", box=box.ROUNDED, show_header=True) + table.add_column("Package", style="cyan") + table.add_column("Pipe") + table.add_column("Type") + table.add_column("Inputs") + table.add_column("Exported") + + for pipe_node in pipes: + inputs_str = ", ".join(f"{key}: {val.concept_ref}" for key, val in pipe_node.input_concept_ids.items()) + exported_str = "[green]yes[/green]" if pipe_node.is_exported else "[dim]no[/dim]" + table.add_row( + pipe_node.package_address, + pipe_node.pipe_code, + pipe_node.pipe_type, + inputs_str or "[dim]-[/dim]", + exported_str, + ) + + console.print(table) + + +def _handle_from_to( + console: Console, + engine: KnowHowQueryEngine, + graph: KnowHowGraph, + raw_from: str, + raw_to: str, + max_depth: int, + compose: bool, +) -> None: + """Find pipe chains from input concept to output concept.""" + from_id = _parse_concept_id(raw_from) + to_id = _parse_concept_id(raw_to) + chains = engine.query_i_have_i_need(from_id, to_id, max_depth=max_depth) + + if not chains: + console.print(f"[yellow]No pipe chains found from '{raw_from}' to '{raw_to}' (max depth {max_depth}).[/yellow]") + return + + if compose: + _print_compose_output(console, graph, chains, from_id, to_id) + else: + console.print(f"[bold]Pipe chains from {raw_from} to {raw_to}:[/bold]\n") + for chain_index, chain in enumerate(chains, start=1): + steps = " -> ".join(chain) + console.print(f" {chain_index}. {steps}") + console.print(f"\n[dim]{len(chains)} chain(s) found.[/dim]") + + +def _print_compose_output( + console: Console, + graph: KnowHowGraph, + chains: list[list[str]], + from_id: ConceptId, + to_id: ConceptId, +) -> None: + """Print MTHDS composition templates for discovered chains.""" + multiple = len(chains) > 1 + + for chain_index, chain in enumerate(chains, start=1): + pipe_nodes: list[PipeNode] = [] + for node_key in chain: + pipe_node = graph.get_pipe_node(node_key) + if pipe_node is not None: + pipe_nodes.append(pipe_node) + + snippet = format_chain_as_mthds_snippet(pipe_nodes, from_id, to_id) + if not snippet: + continue + + if multiple: + console.print(f"[bold]Chain {chain_index} of {len(chains)}:[/bold]") + console.print(snippet) + if multiple and chain_index < len(chains): + console.print() + + +def _handle_check(console: Console, engine: KnowHowQueryEngine, check_arg: str) -> None: + """Check compatibility between two pipes.""" + parts = check_arg.split(",") + if len(parts) != 2: + console.print("[red]--check requires exactly two pipe keys separated by a comma.[/red]") + console.print("[dim]Example: --check 'pkg::pipe_a,pkg::pipe_b'[/dim]") + raise typer.Exit(code=1) + + source_key = parts[0].strip() + target_key = parts[1].strip() + + if not source_key or not target_key: + console.print("[red]--check requires two non-empty pipe keys separated by a comma.[/red]") + console.print("[dim]Example: --check 'pkg::pipe_a,pkg::pipe_b'[/dim]") + raise typer.Exit(code=1) + + compatible_params = engine.check_compatibility(source_key, target_key) + + if compatible_params: + console.print(f"[green]Compatible![/green] Output of '{source_key}' can feed into '{target_key}' via: {', '.join(compatible_params)}") + else: + console.print(f"[yellow]Not compatible.[/yellow] Output of '{source_key}' does not match any input of '{target_key}'.") diff --git a/pipelex/cli/commands/pkg/index_cmd.py b/pipelex/cli/commands/pkg/index_cmd.py new file mode 100644 index 000000000..d57f067e4 --- /dev/null +++ b/pipelex/cli/commands/pkg/index_cmd.py @@ -0,0 +1,61 @@ +from pathlib import Path + +import typer +from rich import box +from rich.table import Table + +from pipelex.core.packages.exceptions import IndexBuildError +from pipelex.core.packages.index.index_builder import build_index_from_cache, build_index_from_project +from pipelex.hub import get_console + + +def do_pkg_index(cache: bool = False) -> None: + """Build and display the package index. + + Args: + cache: If True, index cached packages instead of the current project. + """ + console = get_console() + + try: + if cache: + index = build_index_from_cache() + else: + index = build_index_from_project(Path.cwd()) + except IndexBuildError as exc: + console.print(f"[red]Index build error: {exc}[/red]") + raise typer.Exit(code=1) from exc + + if not index.entries: + console.print("[yellow]No packages found to index.[/yellow]") + raise typer.Exit(code=1) + + has_display_name = any(entry.display_name for entry in index.entries.values()) + + table = Table(title="Package Index", box=box.ROUNDED, show_header=True) + table.add_column("Address", style="cyan") + if has_display_name: + table.add_column("Display Name") + table.add_column("Version") + table.add_column("Description") + table.add_column("Domains", justify="right") + table.add_column("Concepts", justify="right") + table.add_column("Pipes", justify="right") + + for entry in index.entries.values(): + row: list[str] = [entry.address] + if has_display_name: + row.append(entry.display_name or "") + row.extend( + [ + entry.version, + entry.description, + str(len(entry.domains)), + str(len(entry.concepts)), + str(len(entry.pipes)), + ] + ) + table.add_row(*row) + + console.print(table) + console.print(f"\n[dim]{len(index.entries)} package(s) indexed.[/dim]") diff --git a/pipelex/cli/commands/pkg/init_cmd.py b/pipelex/cli/commands/pkg/init_cmd.py new file mode 100644 index 000000000..ce957c356 --- /dev/null +++ b/pipelex/cli/commands/pkg/init_cmd.py @@ -0,0 +1,62 @@ +from pathlib import Path + +import typer + +from pipelex.core.packages.bundle_scanner import build_domain_exports_from_scan, scan_bundles_for_domain_info +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.manifest import MthdsPackageManifest +from pipelex.core.packages.manifest_parser import serialize_manifest_to_toml +from pipelex.hub import get_console + + +def do_pkg_init(force: bool = False) -> None: + """Scan .mthds files in the current directory and generate a METHODS.toml skeleton. + + Args: + force: If True, overwrite an existing METHODS.toml + """ + console = get_console() + cwd = Path.cwd() + manifest_path = cwd / MANIFEST_FILENAME + + # Check if manifest already exists + if manifest_path.exists() and not force: + console.print(f"[red]METHODS.toml already exists at {manifest_path}[/red]") + console.print("Use --force to overwrite.") + raise typer.Exit(code=1) + + # Scan for .mthds files + mthds_files = sorted(cwd.rglob("*.mthds")) + if not mthds_files: + console.print("[red]No .mthds files found in the current directory.[/red]") + raise typer.Exit(code=1) + + # Parse each bundle header to extract domain and main_pipe + domain_pipes, domain_main_pipes, _blueprints, errors = scan_bundles_for_domain_info(mthds_files) + + if errors: + console.print("[yellow]Some files could not be parsed:[/yellow]") + for error in errors: + console.print(f" {error}") + + # Build exports from collected domain/pipe data, placing main_pipe first + exports = build_domain_exports_from_scan(domain_pipes, domain_main_pipes) + + # Generate manifest with placeholder address + dir_name = cwd.name.replace("-", "_").replace(" ", "_").lower() + manifest = MthdsPackageManifest( + address=f"example.com/yourorg/{dir_name}", + version="0.1.0", + description=f"Package generated from {len(mthds_files)} .mthds file(s)", + exports=exports, + ) + + # Serialize and write + toml_content = serialize_manifest_to_toml(manifest) + manifest_path.write_text(toml_content, encoding="utf-8") + + console.print(f"[green]Created {MANIFEST_FILENAME}[/green] with:") + console.print(f" Domains: {len(domain_pipes)}") + console.print(f" Total pipes: {sum(len(pipes) for pipes in domain_pipes.values())}") + console.print(f" Bundles scanned: {len(mthds_files)}") + console.print(f"\n[dim]Edit {MANIFEST_FILENAME} to set the correct address and configure exports.[/dim]") diff --git a/pipelex/cli/commands/pkg/inspect_cmd.py b/pipelex/cli/commands/pkg/inspect_cmd.py new file mode 100644 index 000000000..89b59aa18 --- /dev/null +++ b/pipelex/cli/commands/pkg/inspect_cmd.py @@ -0,0 +1,113 @@ +from pathlib import Path + +import typer +from rich import box +from rich.table import Table + +from pipelex.core.packages.exceptions import IndexBuildError +from pipelex.core.packages.index.index_builder import build_index_from_cache, build_index_from_project +from pipelex.hub import get_console + + +def do_pkg_inspect(address: str, cache: bool = False) -> None: + """Display detailed information about a single package. + + Args: + address: Package address to inspect. + cache: Look in cache instead of the current project. + """ + console = get_console() + + try: + if cache: + index = build_index_from_cache() + else: + index = build_index_from_project(Path.cwd()) + except IndexBuildError as exc: + console.print(f"[red]Index build error: {exc}[/red]") + raise typer.Exit(code=1) from exc + + if not index.entries: + console.print("[yellow]No packages found.[/yellow]") + raise typer.Exit(code=1) + + entry = index.get_entry(address) + if entry is None: + available = ", ".join(sorted(index.entries.keys())) + console.print(f"[red]Package '{address}' not found.[/red]") + console.print(f"[dim]Available packages: {available}[/dim]") + raise typer.Exit(code=1) + + # Package info table + info_table = Table(title="Package Info", box=box.ROUNDED, show_header=True) + info_table.add_column("Field", style="cyan") + info_table.add_column("Value") + info_table.add_row("Address", entry.address) + if entry.display_name: + info_table.add_row("Display Name", entry.display_name) + info_table.add_row("Version", entry.version) + info_table.add_row("Description", entry.description) + if entry.authors: + info_table.add_row("Authors", ", ".join(entry.authors)) + if entry.license: + info_table.add_row("License", entry.license) + if entry.dependencies: + info_table.add_row("Dependencies", ", ".join(entry.dependencies)) + console.print(info_table) + + # Domains table + if entry.domains: + console.print() + domain_table = Table(title="Domains", box=box.ROUNDED, show_header=True) + domain_table.add_column("Domain Code", style="cyan") + domain_table.add_column("Description") + for domain in entry.domains: + domain_table.add_row(domain.domain_code, domain.description or "[dim]-[/dim]") + console.print(domain_table) + + # Concepts table + if entry.concepts: + console.print() + concept_table = Table(title="Concepts", box=box.ROUNDED, show_header=True) + concept_table.add_column("Concept", style="cyan") + concept_table.add_column("Domain") + concept_table.add_column("Description") + concept_table.add_column("Refines") + concept_table.add_column("Fields") + for concept in entry.concepts: + fields_str = ", ".join(concept.structure_fields) if concept.structure_fields else "[dim]-[/dim]" + concept_table.add_row( + concept.concept_code, + concept.domain_code, + concept.description, + concept.refines or "[dim]-[/dim]", + fields_str, + ) + console.print(concept_table) + + # Pipes table + if entry.pipes: + console.print() + pipe_table = Table(title="Pipe Signatures", box=box.ROUNDED, show_header=True) + pipe_table.add_column("Pipe", style="cyan") + pipe_table.add_column("Type") + pipe_table.add_column("Domain") + pipe_table.add_column("Description") + pipe_table.add_column("Inputs") + pipe_table.add_column("Output") + pipe_table.add_column("Exported") + for pipe in entry.pipes: + inputs_str = ", ".join(f"{key}: {val}" for key, val in pipe.input_specs.items()) if pipe.input_specs else "[dim]-[/dim]" + exported_str = "[green]yes[/green]" if pipe.is_exported else "[dim]no[/dim]" + pipe_table.add_row( + pipe.pipe_code, + pipe.pipe_type, + pipe.domain_code, + pipe.description, + inputs_str, + pipe.output_spec, + exported_str, + ) + console.print(pipe_table) + + console.print() diff --git a/pipelex/cli/commands/pkg/install_cmd.py b/pipelex/cli/commands/pkg/install_cmd.py new file mode 100644 index 000000000..5c886e8ba --- /dev/null +++ b/pipelex/cli/commands/pkg/install_cmd.py @@ -0,0 +1,64 @@ +from pathlib import Path + +import typer + +from pipelex.core.packages.dependency_resolver import resolve_remote_dependency +from pipelex.core.packages.exceptions import DependencyResolveError, IntegrityError +from pipelex.core.packages.lock_file import LOCK_FILENAME, LockFileError, parse_lock_file, verify_lock_file +from pipelex.core.packages.manifest import PackageDependency +from pipelex.core.packages.package_cache import is_cached +from pipelex.hub import get_console + + +def do_pkg_install() -> None: + """Install dependencies from methods.lock.""" + console = get_console() + cwd = Path.cwd() + lock_path = cwd / LOCK_FILENAME + + if not lock_path.exists(): + console.print(f"[red]{LOCK_FILENAME} not found in current directory.[/red]") + console.print("Run [bold]pipelex pkg lock[/bold] first to generate a lock file.") + raise typer.Exit(code=1) + + lock_content = lock_path.read_text(encoding="utf-8") + try: + lock_file = parse_lock_file(lock_content) + except LockFileError as exc: + console.print(f"[red]Could not parse {LOCK_FILENAME}: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + if not lock_file.packages: + console.print("[dim]Nothing to install β€” lock file is empty.[/dim]") + return + + fetched_count = 0 + cached_count = 0 + + for address, locked in lock_file.packages.items(): + if is_cached(address, locked.version): + cached_count += 1 + continue + + # Fetch missing package by resolving with exact version constraint + dep = PackageDependency( + address=address, + version=locked.version, + alias=address.rsplit("/", maxsplit=1)[-1].replace("-", "_").replace(".", "_"), + ) + try: + resolve_remote_dependency(dep) + except DependencyResolveError as exc: + console.print(f"[red]Failed to fetch '{address}@{locked.version}': {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + fetched_count += 1 + + # Verify integrity + try: + verify_lock_file(lock_file) + except IntegrityError as exc: + console.print(f"[red]Integrity verification failed: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + console.print(f"[green]Installed {fetched_count} package(s), {cached_count} already cached.[/green]") diff --git a/pipelex/cli/commands/pkg/list_cmd.py b/pipelex/cli/commands/pkg/list_cmd.py new file mode 100644 index 000000000..22334c7b7 --- /dev/null +++ b/pipelex/cli/commands/pkg/list_cmd.py @@ -0,0 +1,77 @@ +from pathlib import Path + +import typer +from rich import box +from rich.table import Table + +from pipelex.core.packages.discovery import MANIFEST_FILENAME, find_package_manifest +from pipelex.core.packages.exceptions import ManifestError +from pipelex.hub import get_console + + +def do_pkg_list() -> None: + """Display the package manifest information. + + Walks up from the current directory to find a METHODS.toml and displays its contents. + """ + console = get_console() + cwd = Path.cwd() + + # Create a dummy bundle path to trigger the walk-up search from cwd + dummy_bundle_path = cwd / "dummy.mthds" + try: + manifest = find_package_manifest(dummy_bundle_path) + except ManifestError as exc: + console.print(f"[red]Error reading METHODS.toml: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + if manifest is None: + console.print(f"[yellow]No {MANIFEST_FILENAME} found in current directory or parent directories.[/yellow]") + console.print("Run [bold]pipelex pkg init[/bold] to create one.") + raise typer.Exit(code=1) + + # Display package info + console.print(f"\n[bold]{MANIFEST_FILENAME}[/bold]\n") + + # Package table + pkg_table = Table(title="Package", box=box.ROUNDED, show_header=True) + pkg_table.add_column("Field", style="cyan") + pkg_table.add_column("Value") + pkg_table.add_row("Address", manifest.address) + if manifest.display_name: + pkg_table.add_row("Display Name", manifest.display_name) + pkg_table.add_row("Version", manifest.version) + pkg_table.add_row("Description", manifest.description) + if manifest.authors: + pkg_table.add_row("Authors", ", ".join(manifest.authors)) + if manifest.license: + pkg_table.add_row("License", manifest.license) + if manifest.mthds_version: + pkg_table.add_row("MTHDS Version", manifest.mthds_version) + console.print(pkg_table) + + # Dependencies table + if manifest.dependencies: + console.print() + deps_table = Table(title="Dependencies", box=box.ROUNDED, show_header=True) + deps_table.add_column("Alias", style="cyan") + deps_table.add_column("Address") + deps_table.add_column("Version") + for dep in manifest.dependencies: + deps_table.add_row(dep.alias, dep.address, dep.version) + console.print(deps_table) + + # Exports table + if manifest.exports: + console.print() + exports_table = Table(title="Exports", box=box.ROUNDED, show_header=True) + exports_table.add_column("Domain", style="cyan") + exports_table.add_column("Pipes") + for domain_export in manifest.exports: + exports_table.add_row( + domain_export.domain_path, + ", ".join(domain_export.pipes), + ) + console.print(exports_table) + + console.print() diff --git a/pipelex/cli/commands/pkg/lock_cmd.py b/pipelex/cli/commands/pkg/lock_cmd.py new file mode 100644 index 000000000..e681945fe --- /dev/null +++ b/pipelex/cli/commands/pkg/lock_cmd.py @@ -0,0 +1,48 @@ +from pathlib import Path + +import typer + +from pipelex.core.packages.dependency_resolver import resolve_all_dependencies +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.exceptions import DependencyResolveError, ManifestError, TransitiveDependencyError +from pipelex.core.packages.lock_file import LOCK_FILENAME, LockFileError, generate_lock_file, serialize_lock_file +from pipelex.core.packages.manifest_parser import parse_methods_toml +from pipelex.hub import get_console + + +def do_pkg_lock() -> None: + """Resolve dependencies and generate methods.lock.""" + console = get_console() + cwd = Path.cwd() + manifest_path = cwd / MANIFEST_FILENAME + + if not manifest_path.exists(): + console.print(f"[red]{MANIFEST_FILENAME} not found in current directory.[/red]") + console.print("Run [bold]pipelex pkg init[/bold] first to create a manifest.") + raise typer.Exit(code=1) + + content = manifest_path.read_text(encoding="utf-8") + try: + manifest = parse_methods_toml(content) + except ManifestError as exc: + console.print(f"[red]Could not parse {MANIFEST_FILENAME}: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + try: + resolved = resolve_all_dependencies(manifest, cwd) + except (DependencyResolveError, TransitiveDependencyError) as exc: + console.print(f"[red]Dependency resolution failed: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + try: + lock = generate_lock_file(manifest, resolved) + except LockFileError as exc: + console.print(f"[red]Lock file generation failed: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + lock_content = serialize_lock_file(lock) + lock_path = cwd / LOCK_FILENAME + lock_path.write_text(lock_content, encoding="utf-8") + + pkg_count = len(lock.packages) + console.print(f"[green]Wrote {LOCK_FILENAME} with {pkg_count} package(s).[/green]") diff --git a/pipelex/cli/commands/pkg/publish_cmd.py b/pipelex/cli/commands/pkg/publish_cmd.py new file mode 100644 index 000000000..852d0df5f --- /dev/null +++ b/pipelex/cli/commands/pkg/publish_cmd.py @@ -0,0 +1,101 @@ +import subprocess # noqa: S404 +from pathlib import Path + +import typer +from rich import box +from rich.console import Console +from rich.table import Table + +from pipelex.core.packages.exceptions import PublishValidationError +from pipelex.core.packages.publish_validation import PublishValidationResult, validate_for_publish +from pipelex.hub import get_console + + +def do_pkg_publish(tag: bool = False) -> None: + """Validate package readiness for distribution. + + Args: + tag: If True and validation passes, create a local git tag v{version}. + """ + console = get_console() + package_root = Path.cwd() + + try: + result = validate_for_publish(package_root) + except PublishValidationError as exc: + console.print(f"[red]Error: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + _display_results(console, result) + + errors = [issue for issue in result.issues if issue.level.is_error] + warnings = [issue for issue in result.issues if issue.level.is_warning] + + console.print(f"\n{len(errors)} error(s), {len(warnings)} warning(s)") + + if errors: + console.print("[red]Package is NOT ready for distribution.[/red]") + raise typer.Exit(code=1) + + if tag and result.package_version: + _create_git_tag(console, package_root, result.package_version) + + console.print("[green]Package is ready for distribution.[/green]") + + +def _display_results(console: Console, result: PublishValidationResult) -> None: + """Display validation issues as Rich tables.""" + errors = [issue for issue in result.issues if issue.level.is_error] + warnings = [issue for issue in result.issues if issue.level.is_warning] + + if errors: + error_table = Table(title="Errors", box=box.ROUNDED, show_header=True) + error_table.add_column("Category", style="red") + error_table.add_column("Message", style="red") + error_table.add_column("Suggestion", style="dim") + + for issue in errors: + error_table.add_row( + issue.category, + issue.message, + issue.suggestion or "", + ) + + console.print(error_table) + + if warnings: + warning_table = Table(title="Warnings", box=box.ROUNDED, show_header=True) + warning_table.add_column("Category", style="yellow") + warning_table.add_column("Message", style="yellow") + warning_table.add_column("Suggestion", style="dim") + + for issue in warnings: + warning_table.add_row( + issue.category, + issue.message, + issue.suggestion or "", + ) + + console.print(warning_table) + + +def _create_git_tag(console: Console, package_root: Path, version: str) -> None: + """Create a local git tag from the already-validated package version.""" + version_tag = f"v{version}" + + try: + subprocess.run( # noqa: S603 + ["git", "tag", version_tag], # noqa: S607 + capture_output=True, + text=True, + check=True, + timeout=10, + cwd=package_root, + ) + console.print(f"[green]Created git tag '{version_tag}'[/green]") + except subprocess.CalledProcessError as exc: + console.print(f"[red]Failed to create git tag: {exc.stderr.strip()}[/red]") + raise typer.Exit(code=1) from exc + except (FileNotFoundError, subprocess.TimeoutExpired) as exc: + console.print("[red]Failed to create git tag: git not available[/red]") + raise typer.Exit(code=1) from exc diff --git a/pipelex/cli/commands/pkg/search_cmd.py b/pipelex/cli/commands/pkg/search_cmd.py new file mode 100644 index 000000000..7cafa5c4f --- /dev/null +++ b/pipelex/cli/commands/pkg/search_cmd.py @@ -0,0 +1,305 @@ +from pathlib import Path + +import typer +from rich import box +from rich.console import Console +from rich.table import Table + +from pipelex.core.concepts.native.concept_native import NativeConceptCode +from pipelex.core.packages.exceptions import GraphBuildError, IndexBuildError +from pipelex.core.packages.graph.graph_builder import build_know_how_graph +from pipelex.core.packages.graph.models import NATIVE_PACKAGE_ADDRESS, ConceptId, PipeNode +from pipelex.core.packages.graph.query_engine import KnowHowQueryEngine +from pipelex.core.packages.index.index_builder import build_index_from_cache, build_index_from_project +from pipelex.core.packages.index.models import ConceptEntry, PackageIndex, PipeSignature +from pipelex.hub import get_console + + +def _matches(query: str, *fields: str | None) -> bool: + """Case-insensitive substring match against any of the provided fields.""" + lower_query = query.lower() + return any(field is not None and lower_query in field.lower() for field in fields) + + +def _search_concepts(index: PackageIndex, query: str, domain_filter: str | None) -> list[tuple[str, ConceptEntry]]: + """Find concepts matching the query, optionally filtered by domain.""" + results: list[tuple[str, ConceptEntry]] = [] + for address, concept in index.all_concepts(): + if domain_filter and concept.domain_code != domain_filter: + continue + if _matches(query, concept.concept_code, concept.description, concept.concept_ref): + results.append((address, concept)) + return results + + +def _search_pipes(index: PackageIndex, query: str, domain_filter: str | None) -> list[tuple[str, PipeSignature]]: + """Find pipes matching the query, optionally filtered by domain.""" + results: list[tuple[str, PipeSignature]] = [] + for address, pipe in index.all_pipes(): + if domain_filter and pipe.domain_code != domain_filter: + continue + if _matches(query, pipe.pipe_code, pipe.description, pipe.output_spec): + results.append((address, pipe)) + return results + + +def _resolve_concept_fuzzy(concept_str: str, index: PackageIndex) -> list[tuple[ConceptId, str]]: + """Fuzzy-resolve a concept string to matching ConceptIds. + + Collects candidates from native concepts and indexed concepts, matches + case-insensitively against concept_code and concept_ref. Exact matches + take priority to prevent 'Text' from ambiguously matching 'TextAndImages'. + + Args: + concept_str: The user-provided concept string (e.g. "Text", "WeightedScore") + index: The package index to search + + Returns: + List of (ConceptId, concept_code) tuples for matching concepts + """ + candidates: list[tuple[ConceptId, str]] = [] + lower_str = concept_str.lower() + + # Native concepts + for native_code in NativeConceptCode: + concept_ref = f"native.{native_code}" + concept_id = ConceptId( + package_address=NATIVE_PACKAGE_ADDRESS, + concept_ref=concept_ref, + ) + code_str: str = native_code.value + if lower_str in code_str.lower() or lower_str in concept_ref.lower(): + candidates.append((concept_id, code_str)) + + # Indexed concepts + for address, concept in index.all_concepts(): + concept_id = ConceptId( + package_address=address, + concept_ref=concept.concept_ref, + ) + if lower_str in concept.concept_code.lower() or lower_str in concept.concept_ref.lower(): + candidates.append((concept_id, concept.concept_code)) + + # Exact-match priority: if any candidate's code or ref matches exactly, return only those + exact_matches: list[tuple[ConceptId, str]] = [] + for cid, code in candidates: + if code.lower() == lower_str or cid.concept_ref.lower() == lower_str: + exact_matches.append((cid, code)) + + if exact_matches: + return exact_matches + + return candidates + + +def _display_ambiguous_concepts( + matches: list[tuple[ConceptId, str]], + concept_str: str, + console: Console, +) -> None: + """Display a table of ambiguous concept matches and a hint to refine the query.""" + console.print(f"[yellow]Ambiguous concept '{concept_str}' β€” matches {len(matches)} concepts:[/yellow]") + table = Table(box=box.ROUNDED, show_header=True) + table.add_column("Package", style="cyan") + table.add_column("Concept Code") + table.add_column("Concept Ref") + for cid, code in matches: + table.add_row(cid.package_address, code, cid.concept_ref) + console.print(table) + console.print("[dim]Refine your query to match exactly one concept.[/dim]") + + +def _display_type_search_pipes(pipes: list[PipeNode], title: str, console: Console) -> None: + """Display a Rich table of pipe nodes matching type search results.""" + pipe_table = Table(title=title, box=box.ROUNDED, show_header=True) + pipe_table.add_column("Package", style="cyan") + pipe_table.add_column("Pipe") + pipe_table.add_column("Type") + pipe_table.add_column("Domain") + pipe_table.add_column("Description") + pipe_table.add_column("Exported") + + for pipe_node in pipes: + exported_str = "[green]yes[/green]" if pipe_node.is_exported else "[dim]no[/dim]" + pipe_table.add_row( + pipe_node.package_address, + pipe_node.pipe_code, + pipe_node.pipe_type, + pipe_node.domain_code, + pipe_node.description, + exported_str, + ) + + console.print(pipe_table) + + +def _handle_accepts_search( + concept_str: str, + index: PackageIndex, + engine: KnowHowQueryEngine, + console: Console, + domain_filter: str | None = None, +) -> None: + """Resolve concept fuzzy and find pipes that accept it.""" + matches = _resolve_concept_fuzzy(concept_str, index) + if not matches: + console.print(f"[yellow]No concept matching '{concept_str}' found.[/yellow]") + return + if len(matches) > 1: + _display_ambiguous_concepts(matches, concept_str, console) + raise typer.Exit(code=1) + + concept_id, concept_code = matches[0] + pipes = engine.query_what_can_i_do(concept_id) + if domain_filter is not None: + pipes = [pipe_node for pipe_node in pipes if pipe_node.domain_code == domain_filter] + if not pipes: + console.print(f"[yellow]No pipes accept concept '{concept_code}' ({concept_id.concept_ref}).[/yellow]") + return + _display_type_search_pipes(pipes, f"Pipes that accept '{concept_code}'", console) + + +def _handle_produces_search( + concept_str: str, + index: PackageIndex, + engine: KnowHowQueryEngine, + console: Console, + domain_filter: str | None = None, +) -> None: + """Resolve concept fuzzy and find pipes that produce it.""" + matches = _resolve_concept_fuzzy(concept_str, index) + if not matches: + console.print(f"[yellow]No concept matching '{concept_str}' found.[/yellow]") + return + if len(matches) > 1: + _display_ambiguous_concepts(matches, concept_str, console) + raise typer.Exit(code=1) + + concept_id, concept_code = matches[0] + pipes = engine.query_what_produces(concept_id) + if domain_filter is not None: + pipes = [pipe_node for pipe_node in pipes if pipe_node.domain_code == domain_filter] + if not pipes: + console.print(f"[yellow]No pipes produce concept '{concept_code}' ({concept_id.concept_ref}).[/yellow]") + return + _display_type_search_pipes(pipes, f"Pipes that produce '{concept_code}'", console) + + +def _do_type_search( + index: PackageIndex, + accepts: str | None, + produces: str | None, + console: Console, + domain_filter: str | None = None, +) -> None: + """Build the know-how graph and delegate to accepts/produces search handlers.""" + try: + graph = build_know_how_graph(index) + except GraphBuildError as exc: + console.print(f"[red]Graph build error: {exc}[/red]") + raise typer.Exit(code=1) from exc + + engine = KnowHowQueryEngine(graph) + + if accepts is not None: + _handle_accepts_search(accepts, index, engine, console, domain_filter=domain_filter) + if produces is not None: + _handle_produces_search(produces, index, engine, console, domain_filter=domain_filter) + + +def do_pkg_search( + query: str | None = None, + domain: str | None = None, + concept_only: bool = False, + pipe_only: bool = False, + cache: bool = False, + accepts: str | None = None, + produces: str | None = None, +) -> None: + """Search the package index for concepts and pipes matching a query. + + Args: + query: Search term (case-insensitive substring match). + domain: Optional domain filter. + concept_only: Show only matching concepts. + pipe_only: Show only matching pipes. + cache: Search cached packages instead of the current project. + accepts: Find pipes that accept this concept (type-compatible search). + produces: Find pipes that produce this concept (type-compatible search). + """ + console = get_console() + + if query is None and accepts is None and produces is None: + console.print("[red]Provide a search query or use --accepts/--produces for type search.[/red]") + raise typer.Exit(code=1) + + try: + if cache: + index = build_index_from_cache() + else: + index = build_index_from_project(Path.cwd()) + except IndexBuildError as exc: + console.print(f"[red]Index build error: {exc}[/red]") + raise typer.Exit(code=1) from exc + + if not index.entries: + console.print("[yellow]No packages found to search.[/yellow]") + raise typer.Exit(code=1) + + if accepts is not None or produces is not None: + _do_type_search(index, accepts, produces, console, domain_filter=domain) + return + + assert query is not None + + both_or_neither = concept_only == pipe_only + show_concepts = both_or_neither or concept_only + show_pipes = both_or_neither or pipe_only + + matching_concepts = _search_concepts(index, query, domain) if show_concepts else [] + matching_pipes = _search_pipes(index, query, domain) if show_pipes else [] + + if not matching_concepts and not matching_pipes: + console.print(f"[yellow]No results matching '{query}'.[/yellow]") + return + + if matching_concepts: + concept_table = Table(title="Matching Concepts", box=box.ROUNDED, show_header=True) + concept_table.add_column("Package", style="cyan") + concept_table.add_column("Concept") + concept_table.add_column("Domain") + concept_table.add_column("Description") + concept_table.add_column("Refines") + + for address, concept in matching_concepts: + concept_table.add_row( + address, + concept.concept_code, + concept.domain_code, + concept.description, + concept.refines or "[dim]-[/dim]", + ) + + console.print(concept_table) + + if matching_pipes: + pipe_table = Table(title="Matching Pipes", box=box.ROUNDED, show_header=True) + pipe_table.add_column("Package", style="cyan") + pipe_table.add_column("Pipe") + pipe_table.add_column("Type") + pipe_table.add_column("Domain") + pipe_table.add_column("Description") + pipe_table.add_column("Exported") + + for address, pipe in matching_pipes: + exported_str = "[green]yes[/green]" if pipe.is_exported else "[dim]no[/dim]" + pipe_table.add_row( + address, + pipe.pipe_code, + pipe.pipe_type, + pipe.domain_code, + pipe.description, + exported_str, + ) + + console.print(pipe_table) diff --git a/pipelex/cli/commands/pkg/update_cmd.py b/pipelex/cli/commands/pkg/update_cmd.py new file mode 100644 index 000000000..3cad5a12d --- /dev/null +++ b/pipelex/cli/commands/pkg/update_cmd.py @@ -0,0 +1,110 @@ +from pathlib import Path + +import typer +from rich.console import Console + +from pipelex.core.packages.dependency_resolver import resolve_all_dependencies +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.exceptions import DependencyResolveError, ManifestError, TransitiveDependencyError +from pipelex.core.packages.lock_file import ( + LOCK_FILENAME, + LockFile, + LockFileError, + generate_lock_file, + parse_lock_file, + serialize_lock_file, +) +from pipelex.core.packages.manifest_parser import parse_methods_toml +from pipelex.hub import get_console + + +def _display_lock_diff(console: Console, old_lock: LockFile, new_lock: LockFile) -> None: + """Display differences between an old and new lock file. + + Args: + console: Rich console for output. + old_lock: The previous lock file. + new_lock: The freshly generated lock file. + """ + old_addresses = set(old_lock.packages) + new_addresses = set(new_lock.packages) + + added = new_addresses - old_addresses + removed = old_addresses - new_addresses + common = old_addresses & new_addresses + + updated: list[str] = [] + for address in sorted(common): + old_ver = old_lock.packages[address].version + new_ver = new_lock.packages[address].version + if old_ver != new_ver: + updated.append(f"{address}: {old_ver} -> {new_ver}") + + if not added and not removed and not updated: + console.print("[dim]No changes β€” lock file is up to date.[/dim]") + return + + for address in sorted(added): + version = new_lock.packages[address].version + console.print(f" [green]+ {address}@{version}[/green]") + + for address in sorted(removed): + version = old_lock.packages[address].version + console.print(f" [red]- {address}@{version}[/red]") + + for line in updated: + console.print(f" [yellow]{line}[/yellow]") + + +def do_pkg_update() -> None: + """Re-resolve dependencies and update methods.lock.""" + console = get_console() + cwd = Path.cwd() + manifest_path = cwd / MANIFEST_FILENAME + + if not manifest_path.exists(): + console.print(f"[red]{MANIFEST_FILENAME} not found in current directory.[/red]") + console.print("Run [bold]pipelex pkg init[/bold] first to create a manifest.") + raise typer.Exit(code=1) + + content = manifest_path.read_text(encoding="utf-8") + try: + manifest = parse_methods_toml(content) + except ManifestError as exc: + console.print(f"[red]Could not parse {MANIFEST_FILENAME}: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + # Read existing lock for diff comparison + lock_path = cwd / LOCK_FILENAME + old_lock: LockFile | None = None + if lock_path.exists(): + try: + old_lock = parse_lock_file(lock_path.read_text(encoding="utf-8")) + except LockFileError: + pass # Ignore unparseable old lock + + # Fresh resolve (ignoring existing lock) + try: + resolved = resolve_all_dependencies(manifest, cwd) + except (DependencyResolveError, TransitiveDependencyError) as exc: + console.print(f"[red]Dependency resolution failed: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + try: + new_lock = generate_lock_file(manifest, resolved) + except LockFileError as exc: + console.print(f"[red]Lock file generation failed: {exc.message}[/red]") + raise typer.Exit(code=1) from exc + + # Write lock file + lock_content = serialize_lock_file(new_lock) + lock_path.write_text(lock_content, encoding="utf-8") + + pkg_count = len(new_lock.packages) + console.print(f"[green]Wrote {LOCK_FILENAME} with {pkg_count} package(s).[/green]") + + # Display diff + if old_lock is not None: + _display_lock_diff(console, old_lock, new_lock) + else: + console.print("[dim]No previous lock file β€” created fresh.[/dim]") diff --git a/pipelex/cli/commands/run_cmd.py b/pipelex/cli/commands/run_cmd.py index 751c4c28e..fa8490676 100644 --- a/pipelex/cli/commands/run_cmd.py +++ b/pipelex/cli/commands/run_cmd.py @@ -19,8 +19,8 @@ handle_model_choice_error, ) from pipelex.config import get_config -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError -from pipelex.core.interpreter.helpers import is_pipelex_file +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION, is_pipelex_file from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError from pipelex.core.stuffs.stuff_viewer import render_stuff_viewer @@ -30,7 +30,7 @@ from pipelex.pipe_run.pipe_run_mode import PipeRunMode from pipelex.pipelex import Pipelex from pipelex.pipeline.exceptions import PipelineExecutionError -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.system.runtime import IntegrationMode from pipelex.system.telemetry.events import EventProperty from pipelex.tools.misc.file_utils import get_incremental_directory_path @@ -43,15 +43,15 @@ def run_cmd( target: Annotated[ str | None, - typer.Argument(help="Pipe code, bundle file path (.plx), or pipeline directory (auto-detected)"), + typer.Argument(help="Pipe code, bundle file path (.mthds), or pipeline directory (auto-detected)"), ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code to run, can be omitted if you specify a bundle (.plx) that declares a main pipe"), + typer.Option("--pipe", help="Pipe code to run, can be omitted if you specify a bundle (.mthds) that declares a main pipe"), ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx) - runs its main_pipe unless you specify a pipe code"), + typer.Option("--bundle", help="Bundle file path (.mthds) - runs its main_pipe unless you specify a pipe code"), ] = None, inputs: Annotated[ str | None, @@ -101,20 +101,20 @@ def run_cmd( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files). Can be specified multiple times."), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times."), ] = None, ) -> None: """Execute a pipeline from a specific bundle file (or not), specifying its pipe code or not. If the bundle is provided, it will run its main pipe unless you specify a pipe code. If the pipe code is provided, you don't need to provide a bundle file if it's already part of the imported packages. - If a directory is provided, it auto-detects bundle.plx and inputs.json inside it. + If a directory is provided, it auto-detects bundle.mthds and inputs.json inside it. Examples: pipelex run my_pipe - pipelex run --bundle my_bundle.plx - pipelex run --bundle my_bundle.plx --pipe my_pipe + pipelex run --bundle my_bundle.mthds + pipelex run --bundle my_bundle.mthds --pipe my_pipe pipelex run --pipe my_pipe --inputs data.json - pipelex run my_bundle.plx --inputs data.json + pipelex run my_bundle.mthds --inputs data.json pipelex run pipeline_01/ pipelex run pipeline_01/ --pipe my_pipe pipelex run my_pipe --working-memory-path results.json --no-pretty-print @@ -158,30 +158,30 @@ def run_cmd( ) raise typer.Exit(1) - # Find .plx: try default name first, then fall back to single .plx + # Find .mthds: try default name first, then fall back to single .mthds bundle_file = target_path / DEFAULT_BUNDLE_FILE_NAME if bundle_file.is_file(): bundle_path = str(bundle_file) else: - plx_files = list(target_path.glob("*.plx")) - if len(plx_files) == 0: + mthds_files = list(target_path.glob(f"*{MTHDS_EXTENSION}")) + if len(mthds_files) == 0: typer.secho( - f"Failed to run: no .plx bundle file found in directory '{target}'", + f"Failed to run: no .mthds bundle file found in directory '{target}'", fg=typer.colors.RED, err=True, ) raise typer.Exit(1) - if len(plx_files) > 1: - plx_names = ", ".join(plx_file.name for plx_file in plx_files) + if len(mthds_files) > 1: + mthds_names = ", ".join(mthds_file.name for mthds_file in mthds_files) typer.secho( - f"Failed to run: multiple .plx files found in '{target}' ({plx_names}) " + f"Failed to run: multiple .mthds files found in '{target}' ({mthds_names}) " f"and no '{DEFAULT_BUNDLE_FILE_NAME}'. " - f"Pass the .plx file directly, e.g.: pipelex run {target_path / plx_files[0].name}", + f"Pass the .mthds file directly, e.g.: pipelex run {target_path / mthds_files[0].name}", fg=typer.colors.RED, err=True, ) raise typer.Exit(1) - bundle_path = str(plx_files[0]) + bundle_path = str(mthds_files[0]) # Auto-detect inputs if --inputs not explicitly provided inputs_file = target_path / DEFAULT_INPUTS_FILE_NAME @@ -207,7 +207,7 @@ def run_cmd( bundle_path = target if bundle: typer.secho( - "Failed to run: cannot use option --bundle if you're already passing a bundle file (.plx) as positional argument", + "Failed to run: cannot use option --bundle if you're already passing a bundle file (.mthds) as positional argument", fg=typer.colors.RED, err=True, ) @@ -236,14 +236,14 @@ def run_cmd( async def run_pipeline(pipe_code: str | None = None, bundle_path: str | None = None): source_description: str - plx_content: str | None = None + mthds_content: str | None = None if bundle_path: try: - plx_content = Path(bundle_path).read_text(encoding="utf-8") + mthds_content = Path(bundle_path).read_text(encoding="utf-8") # Use lightweight parsing to extract main_pipe without full validation # Full validation happens later during execute_pipeline if not pipe_code: - bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) main_pipe_code = bundle_blueprint.main_pipe if not main_pipe_code: msg = ( @@ -259,7 +259,7 @@ async def run_pipeline(pipe_code: str | None = None, bundle_path: str | None = N except FileNotFoundError as exc: typer.secho(f"Failed to load bundle '{bundle_path}': {exc}", fg=typer.colors.RED, err=True) raise typer.Exit(1) from exc - except (PipelexInterpreterError, PLXDecodeError) as exc: + except (PipelexInterpreterError, MthdsDecodeError) as exc: typer.secho(f"Failed to parse bundle '{bundle_path}': {exc}", fg=typer.colors.RED, err=True) raise typer.Exit(1) from exc elif pipe_code: @@ -299,15 +299,18 @@ async def run_pipeline(pipe_code: str | None = None, bundle_path: str | None = N ) try: - pipe_output = await execute_pipeline( - pipe_code=pipe_code, - plx_content=plx_content, + runner = PipelexRunner( bundle_uri=bundle_path, - inputs=pipeline_inputs, pipe_run_mode=pipe_run_mode, execution_config=execution_config, library_dirs=library_dir, ) + response = await runner.execute_pipeline( + pipe_code=pipe_code, + mthds_content=mthds_content, + inputs=pipeline_inputs, + ) + pipe_output = response.pipe_output except PipelineExecutionError as exc: typer.secho(f"Failed to execute pipeline '{exc.pipe_code}': {exc}", fg=typer.colors.RED, err=True) raise typer.Exit(1) from exc diff --git a/pipelex/cli/commands/show_cmd.py b/pipelex/cli/commands/show_cmd.py index 750f5cb1a..b6a04bd47 100644 --- a/pipelex/cli/commands/show_cmd.py +++ b/pipelex/cli/commands/show_cmd.py @@ -29,7 +29,6 @@ ) from pipelex.pipelex import Pipelex from pipelex.system.configuration.config_loader import config_manager -from pipelex.system.configuration.configs import ConfigPaths from pipelex.system.runtime import IntegrationMode from pipelex.system.telemetry.events import EventName, EventProperty from pipelex.tools.misc.package_utils import get_package_version @@ -77,8 +76,8 @@ def do_show_backends(show_all: bool = False) -> None: backend_library = InferenceBackendLibrary() backend_library.load( secrets_provider=secrets_provider, - backends_library_path=ConfigPaths.BACKENDS_FILE_PATH, - backends_dir_path=ConfigPaths.BACKENDS_DIR_PATH, + backends_library_path=config_manager.backends_file_path, + backends_dir_path=config_manager.backends_dir_path, include_disabled=True, ) else: @@ -201,7 +200,7 @@ def show_pipe_cmd( typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, ) -> None: diff --git a/pipelex/cli/commands/validate_cmd.py b/pipelex/cli/commands/validate_cmd.py index f7701216e..b5ff7770c 100644 --- a/pipelex/cli/commands/validate_cmd.py +++ b/pipelex/cli/commands/validate_cmd.py @@ -77,7 +77,7 @@ def do_validate_all_libraries_and_dry_run( def validate_cmd( target: Annotated[ str | None, - typer.Argument(help="Pipe code or bundle file path (auto-detected based on .plx extension)"), + typer.Argument(help="Pipe code or bundle file path (auto-detected based on .mthds extension)"), ] = None, pipe: Annotated[ str | None, @@ -87,7 +87,7 @@ def validate_cmd( str | None, typer.Option( "--bundle", - help="Bundle file path (.plx) - validates all pipes in the bundle", + help="Bundle file path (.mthds) - validates all pipes in the bundle", ), ] = None, validate_all: Annotated[ @@ -99,7 +99,7 @@ def validate_cmd( typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, ) -> None: @@ -107,9 +107,9 @@ def validate_cmd( Examples: pipelex validate my_pipe - pipelex validate my_bundle.plx - pipelex validate --bundle my_bundle.plx - pipelex validate --bundle my_bundle.plx --pipe my_pipe + pipelex validate my_bundle.mthds + pipelex validate --bundle my_bundle.mthds + pipelex validate --bundle my_bundle.mthds --pipe my_pipe pipelex validate --all """ if validate_all: @@ -149,7 +149,7 @@ def validate_cmd( bundle_path = target_path if bundle: typer.secho( - "Failed to validate: cannot use option --bundle if you're already passing a bundle file (.plx) as positional argument", + "Failed to validate: cannot use option --bundle if you're already passing a bundle file (.mthds) as positional argument", fg=typer.colors.RED, err=True, ) @@ -187,7 +187,7 @@ async def validate_pipe( ): if bundle_path: try: - await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) typer.secho( f"βœ… Successfully validated bundle '{bundle_path}'", fg=typer.colors.GREEN, diff --git a/pipelex/cli/dev_cli/_dev_cli.py b/pipelex/cli/dev_cli/_dev_cli.py index ebc11c030..8e3634c5a 100644 --- a/pipelex/cli/dev_cli/_dev_cli.py +++ b/pipelex/cli/dev_cli/_dev_cli.py @@ -1,6 +1,7 @@ """Main entry point for the internal development CLI.""" import sys +from pathlib import Path from typing import Annotated import typer @@ -13,6 +14,7 @@ from pipelex.cli.dev_cli.commands.check_gateway_models_cmd import check_gateway_models_cmd from pipelex.cli.dev_cli.commands.check_rules_sync_cmd import check_rules_sync_cmd from pipelex.cli.dev_cli.commands.check_urls_cmd import DEFAULT_TIMEOUT, check_urls_cmd +from pipelex.cli.dev_cli.commands.generate_mthds_schema_cmd import generate_mthds_schema_cmd from pipelex.cli.dev_cli.commands.kit_cmd import kit_app from pipelex.cli.dev_cli.commands.preprocess_test_models_cmd import preprocess_test_models_cmd from pipelex.cli.dev_cli.commands.sync_main_config_cmd import SyncTarget, sync_main_config_cmd @@ -32,6 +34,7 @@ def list_commands(self, ctx: Context) -> list[str]: "check-gateway-models", "check-rules", "check-urls", + "generate-mthds-schema", "kit", "preprocess-test-models", "sync-main-config", @@ -137,6 +140,24 @@ def check_urls_command( sys.exit(1) +@app.command(name="generate-mthds-schema", help="Generate JSON Schema for .mthds files (for Taplo validation)") +def generate_mthds_schema_command( + output: Annotated[str | None, typer.Option("--output", "-o", help="Custom output path for the schema file")] = None, + quiet: Annotated[bool, typer.Option("--quiet", "-q", help="Output only a single validation line")] = False, +) -> None: + """Generate a Taplo-compatible JSON Schema from MTHDS blueprint classes.""" + try: + output_path = Path(output) if output else None + generate_mthds_schema_cmd(output=output_path, quiet=quiet) + except Exception: + console = get_console() + console.print() + console.print("[bold red]Unexpected error occurred[/bold red]") + console.print() + console.print(Traceback()) + sys.exit(1) + + @app.command(name="check-gateway-models", help="Verify that gateway models reference is up-to-date") def check_gateway_models_command( show_diff: Annotated[bool, typer.Option("--show-diff/--no-diff", help="Show differences if found")] = True, diff --git a/pipelex/cli/dev_cli/commands/generate_mthds_schema_cmd.py b/pipelex/cli/dev_cli/commands/generate_mthds_schema_cmd.py new file mode 100644 index 000000000..d3b93d6a7 --- /dev/null +++ b/pipelex/cli/dev_cli/commands/generate_mthds_schema_cmd.py @@ -0,0 +1,65 @@ +"""Command to generate JSON Schema for .mthds files.""" + +from __future__ import annotations + +import json +import sys +from pathlib import Path + +from rich.panel import Panel + +from pipelex.hub import get_console +from pipelex.language.mthds_schema_generator import generate_mthds_schema + +# Path to the generated schema file, alongside mthds_factory.py and mthds_config.py +MTHDS_SCHEMA_PATH = Path("pipelex/language/mthds_schema.json") + + +def generate_mthds_schema_cmd(output: Path | None = None, quiet: bool = False) -> None: + """Generate a Taplo-compatible JSON Schema for .mthds files. + + Generates the schema from PipelexBundleBlueprint and writes it as JSON. + The schema enables IDE validation and autocompletion in the vscode-pipelex extension. + + Args: + output: Custom output path. Defaults to pipelex/language/mthds_schema.json. + quiet: If True, output only a single validation line. + """ + console = get_console() + output_path = output or MTHDS_SCHEMA_PATH + + if not quiet: + console.print() + console.print("[bold]Generating MTHDS JSON Schema...[/bold]") + console.print() + + try: + schema = generate_mthds_schema() + except Exception: + if quiet: + console.print("[red]\u2717 MTHDS schema generation: FAILED[/red]") + else: + console.print("[bold red]\u2717 Failed to generate MTHDS schema[/bold red]") + sys.exit(1) + + # Ensure parent directory exists + output_path.parent.mkdir(parents=True, exist_ok=True) + + # Write the schema file + schema_json = json.dumps(schema, indent=2, ensure_ascii=False) + "\n" + output_path.write_text(schema_json, encoding="utf-8") + + # Count definitions for reporting + definition_count = len(schema.get("definitions", {})) + + if quiet: + console.print(f"[green]\u2713 MTHDS schema generation: PASSED[/green] ({definition_count} definitions)") + else: + success_panel = Panel( + f"[green]\u2713[/green] Schema generated successfully!\n\n[dim]Output: {output_path}[/dim]\n[dim]Definitions: {definition_count}[/dim]", + title="[bold green]MTHDS Schema Generation: PASSED[/bold green]", + border_style="green", + padding=(1, 2), + ) + console.print(success_panel) + console.print() diff --git a/pipelex/cli/dev_cli/commands/preprocess_test_models_cmd.py b/pipelex/cli/dev_cli/commands/preprocess_test_models_cmd.py index 4766cb5de..2fae6688e 100644 --- a/pipelex/cli/dev_cli/commands/preprocess_test_models_cmd.py +++ b/pipelex/cli/dev_cli/commands/preprocess_test_models_cmd.py @@ -13,6 +13,7 @@ from rich.table import Table from pipelex.hub import get_console +from pipelex.system.configuration.config_loader import config_manager from pipelex.system.configuration.configs import ConfigPaths from pipelex.system.pipelex_service.exceptions import RemoteConfigFetchError, RemoteConfigValidationError from pipelex.system.pipelex_service.remote_config_fetcher import RemoteConfigFetcher @@ -156,7 +157,7 @@ def _collect_all_model_availability() -> dict[str, Any]: "text_extractor": {}, } - backends_dir = Path(ConfigPaths.BACKENDS_DIR_PATH) + backends_dir = Path(config_manager.backends_dir_path) # Process each backend TOML file for backend_file in sorted(backends_dir.glob("*.toml")): @@ -603,7 +604,7 @@ def preprocess_test_models_cmd( console.print() # Collect model availability - backends_dir = Path(ConfigPaths.BACKENDS_DIR_PATH) + backends_dir = Path(config_manager.backends_dir_path) if not backends_dir.exists(): if quiet: console.print(f"[red]βœ— Preprocessing failed:[/red] Backends directory not found: {backends_dir}") diff --git a/pipelex/cli/error_handlers.py b/pipelex/cli/error_handlers.py index 82cb8961b..cc8cf3787 100644 --- a/pipelex/cli/error_handlers.py +++ b/pipelex/cli/error_handlers.py @@ -235,7 +235,7 @@ def handle_build_validation_failure(exc: ValidateBundleError) -> NoReturn: # Display build-specific tips console.print( "[bold green]πŸ’‘ Tip:[/bold green] Try rephrasing your prompt or simplifying the pipeline requirements. " - "Breaking complex workflows into smaller steps can also help." + "Breaking complex methods into smaller steps can also help." ) console.print(f"[dim]Learn more: {URLs.documentation}[/dim]") console.print(f"[dim]Join our Discord for help: {URLs.discord}[/dim]\n") diff --git a/pipelex/client/api_serializer.py b/pipelex/client/api_serializer.py deleted file mode 100644 index db173f9b2..000000000 --- a/pipelex/client/api_serializer.py +++ /dev/null @@ -1,36 +0,0 @@ -from typing import Any - -from pipelex.core.memory.working_memory import WorkingMemory -from pipelex.tools.misc.json_utils import clean_json_content - - -class ApiSerializer: - """Handles API-specific serialization with datetime formatting and cleanup.""" - - @classmethod - def serialize_working_memory_for_api(cls, working_memory: WorkingMemory | None = None) -> dict[str, dict[str, Any]]: - """Convert WorkingMemory to API-ready format with proper datetime handling. - - Args: - working_memory: The WorkingMemory to serialize - - Returns: - PipelineInputs ready for API transmission with datetime strings and no __class__/__module__. - Returns plain dicts with {"concept": str, "content": dict | list} structure for JSON serialization. - - """ - pipeline_inputs: dict[str, dict[str, Any]] = {} - if working_memory is None: - return pipeline_inputs - - for stuff_name, stuff in working_memory.root.items(): - content_dict = stuff.content.model_dump(serialize_as_any=True) - clean_content = clean_json_content(content_dict) - - # Create plain dict instead of DictStuff instance for JSON serialization - pipeline_inputs[stuff_name] = { - "concept": stuff.concept.code, - "content": clean_content, - } - - return pipeline_inputs diff --git a/pipelex/client/client.py b/pipelex/client/client.py deleted file mode 100644 index 7277114c3..000000000 --- a/pipelex/client/client.py +++ /dev/null @@ -1,170 +0,0 @@ -from typing import Any - -import httpx -from typing_extensions import override - -from pipelex.client.exceptions import ClientAuthenticationError -from pipelex.client.pipeline_request_factory import PipelineRequestFactory -from pipelex.client.pipeline_response_factory import PipelineResponseFactory -from pipelex.client.protocol import PipelexProtocol, PipelineInputs, PipelineRequest, PipelineRequestError, PipelineResponse -from pipelex.core.memory.working_memory import WorkingMemory -from pipelex.core.memory.working_memory_factory import WorkingMemoryFactory -from pipelex.core.pipes.variable_multiplicity import VariableMultiplicity -from pipelex.system.environment import get_required_env - - -class PipelexClient(PipelexProtocol): - """A client for interacting with Pipelex pipelines through the API. - - This client provides a user-friendly interface for executing pipelines through - the remote API. - - Args: - api_token: The API token to use for authentication. If not provided, it will be loaded from the PIPELEX_API_KEY environment variable. - If the environment variable is not set, an error will be raised. - - """ - - def __init__( - self, - api_token: str | None = None, - api_base_url: str | None = None, - ): - self.api_token = api_token or get_required_env("PIPELEX_API_KEY") - - if not self.api_token: - msg = "API token is required for API execution" - raise ClientAuthenticationError(msg) - - self.api_base_url = api_base_url or get_required_env("PIPELEX_API_BASE_URL") - if not self.api_base_url: - msg = "API base URL is required for API execution" - raise ClientAuthenticationError(msg) - - self.client: httpx.AsyncClient | None = None - - def start_client(self) -> "PipelexClient": - """Initialize the HTTP client for API calls.""" - self.client = httpx.AsyncClient(base_url=self.api_base_url, headers={"Authorization": f"Bearer {self.api_token}"}) - return self - - async def close(self): - """Close the HTTP client.""" - if self.client: - await self.client.aclose() - self.client = None - - async def _make_api_call(self, endpoint: str, request: str | None = None) -> dict[str, Any]: - """Make an API call to the Pipelex server. - - Args: - endpoint: The API endpoint to call, relative to the base URL - request: A JSON-formatted string to send as the request body, or None if no body is needed - Returns: - dict[str, Any]: The JSON-decoded response from the server - Raises: - httpx.HTTPError: If the request fails or returns a non-200 status code - - """ - if not self.client: - self.start_client() - assert self.client is not None - - # Convert JSON string to UTF-8 bytes if not None - content = request.encode("utf-8") if request is not None else None - response = await self.client.post(f"/{endpoint}", content=content, headers={"Content-Type": "application/json"}, timeout=1200) - response.raise_for_status() - response_data: dict[str, Any] = response.json() - return response_data - - @override - async def execute_pipeline( - self, - pipe_code: str | None = None, - plx_content: str | None = None, - inputs: PipelineInputs | WorkingMemory | None = None, - output_name: str | None = None, - output_multiplicity: VariableMultiplicity | None = None, - dynamic_output_concept_code: str | None = None, - ) -> PipelineResponse: - """Execute a pipeline synchronously and wait for its completion. - - Args: - pipe_code: The code identifying the pipeline to execute - plx_content: Content of the pipeline bundle to execute - inputs: Inputs passed to the pipeline - output_name: Name of the output slot to write to - output_multiplicity: Output multiplicity setting - dynamic_output_concept_code: Override for the dynamic output concept code - - Returns: - Complete execution results including pipeline state and output - """ - if not pipe_code and not plx_content: - msg = "Either pipe_code or plx_content must be provided to the API execute_pipeline." - raise PipelineRequestError(message=msg) - - working_memory: WorkingMemory | None = None - pipeline_request: PipelineRequest | None = None - if inputs is not None: - if isinstance(inputs, WorkingMemory): - working_memory = inputs - else: - working_memory = WorkingMemoryFactory.make_from_pipeline_inputs(pipeline_inputs=inputs) - - pipeline_request = PipelineRequestFactory.make_from_working_memory( - pipe_code=pipe_code, - plx_content=plx_content, - working_memory=working_memory, - output_name=output_name, - output_multiplicity=output_multiplicity, - dynamic_output_concept_code=dynamic_output_concept_code, - ) - response = await self._make_api_call("v1/pipeline/execute", request=pipeline_request.model_dump_json()) - return PipelineResponseFactory.make_from_api_response(response) - - @override - async def start_pipeline( - self, - pipe_code: str | None = None, - plx_content: str | None = None, - inputs: PipelineInputs | WorkingMemory | None = None, - output_name: str | None = None, - output_multiplicity: VariableMultiplicity | None = None, - dynamic_output_concept_code: str | None = None, - ) -> PipelineResponse: - """Start a pipeline execution asynchronously without waiting for completion. - - Args: - pipe_code: The code identifying the pipeline to execute - plx_content: Content of the pipeline bundle to execute - inputs: Inputs passed to the pipeline - output_name: Name of the output slot to write to - output_multiplicity: Output multiplicity setting - dynamic_output_concept_code: Override for the dynamic output concept code - - Returns: - Initial response with pipeline_run_id and created_at timestamp - """ - if not pipe_code and not plx_content: - msg = "Either pipe_code or plx_content must be provided to the API start_pipeline." - raise PipelineRequestError(message=msg) - - working_memory: WorkingMemory | None = None - pipeline_request: PipelineRequest | None = None - if inputs is not None: - if isinstance(inputs, WorkingMemory): - working_memory = inputs - else: - working_memory = WorkingMemoryFactory.make_from_pipeline_inputs(pipeline_inputs=inputs) - - pipeline_request = PipelineRequestFactory.make_from_working_memory( - pipe_code=pipe_code, - plx_content=plx_content, - working_memory=working_memory, - output_name=output_name, - output_multiplicity=output_multiplicity, - dynamic_output_concept_code=dynamic_output_concept_code, - ) - response = await self._make_api_call("v1/pipeline/start", request=pipeline_request.model_dump_json()) - return PipelineResponseFactory.make_from_api_response(response) diff --git a/pipelex/client/exceptions.py b/pipelex/client/exceptions.py deleted file mode 100644 index 20128d15d..000000000 --- a/pipelex/client/exceptions.py +++ /dev/null @@ -1,5 +0,0 @@ -from pipelex.base_exceptions import PipelexError - - -class ClientAuthenticationError(PipelexError): - pass diff --git a/pipelex/client/pipeline_request_factory.py b/pipelex/client/pipeline_request_factory.py deleted file mode 100644 index 29f134944..000000000 --- a/pipelex/client/pipeline_request_factory.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import Any, cast - -from pipelex.client.api_serializer import ApiSerializer -from pipelex.client.protocol import PipelineInputs, PipelineRequest -from pipelex.core.memory.working_memory import WorkingMemory -from pipelex.core.pipes.variable_multiplicity import VariableMultiplicity - - -class PipelineRequestFactory: - """Factory class for creating PipelineRequest objects from WorkingMemory.""" - - @staticmethod - def make_from_working_memory( - pipe_code: str | None, - plx_content: str | None, - working_memory: WorkingMemory | None = None, - output_name: str | None = None, - output_multiplicity: VariableMultiplicity | None = None, - dynamic_output_concept_code: str | None = None, - ) -> PipelineRequest: - """Create a PipelineRequest from a WorkingMemory object. - - Args: - pipe_code: The code identifying the pipeline to execute - plx_content: Content of the pipeline bundle to execute - working_memory: The WorkingMemory to convert - output_name: Name of the output slot to write to - output_multiplicity: Output multiplicity setting - dynamic_output_concept_code: Override for the dynamic output concept code - plx_content: Content of the pipeline bundle to execute - Returns: - PipelineRequest with the working memory serialized to reduced format - - """ - return PipelineRequest( - pipe_code=pipe_code, - plx_content=plx_content, - # `ApiSerializer.serialize_working_memory_for_api` returns a dict[str, dict[str, Any]] (plain dicts), which is a valid PipelineInputs - inputs=cast("PipelineInputs", ApiSerializer.serialize_working_memory_for_api(working_memory=working_memory)), - output_name=output_name, - output_multiplicity=output_multiplicity, - dynamic_output_concept_code=dynamic_output_concept_code, - ) - - @staticmethod - def make_from_body(request_body: dict[str, Any]) -> PipelineRequest: - """Create a PipelineRequest from raw request body dictionary. - - Args: - request_body: Raw dictionary from API request body - - Returns: - PipelineRequest object with dictionary working_memory - - """ - return PipelineRequest( - pipe_code=request_body.get("pipe_code"), - plx_content=request_body.get("plx_content"), - inputs=request_body.get("inputs", {}), - output_name=request_body.get("output_name"), - output_multiplicity=request_body.get("output_multiplicity"), - dynamic_output_concept_code=request_body.get("dynamic_output_concept_code"), - ) diff --git a/pipelex/client/pipeline_response_factory.py b/pipelex/client/pipeline_response_factory.py deleted file mode 100644 index be04833d8..000000000 --- a/pipelex/client/pipeline_response_factory.py +++ /dev/null @@ -1,80 +0,0 @@ -from typing import Any - -from pipelex.client.protocol import PipelineResponse, PipelineState -from pipelex.core.memory.working_memory import MAIN_STUFF_NAME, DictWorkingMemory, WorkingMemory -from pipelex.core.pipes.pipe_output import DictPipeOutput, PipeOutput -from pipelex.core.stuffs.stuff import DictStuff - - -class PipelineResponseFactory: - """Factory class for creating PipelineResponse objects from PipeOutput.""" - - @staticmethod - def _serialize_working_memory_with_dict_stuffs(working_memory: WorkingMemory) -> DictWorkingMemory: - """Convert WorkingMemory to dict with DictStuff objects (content as dict). - - Keeps the WorkingMemory structure but converts each Stuff.content to dict. - - Args: - working_memory: The WorkingMemory to serialize - - Returns: - Dict with root containing DictStuff objects (serialized) and aliases - """ - dict_stuffs_root: dict[str, DictStuff] = {} - - # Convert each Stuff β†’ DictStuff by dumping only the content - for stuff_name, stuff in working_memory.root.items(): - dict_stuff = DictStuff( - concept=stuff.concept.concept_ref, - content=stuff.content.model_dump(serialize_as_any=True), - ) - dict_stuffs_root[stuff_name] = dict_stuff - - return DictWorkingMemory(root=dict_stuffs_root, aliases=working_memory.aliases) - - @staticmethod - def make_from_pipe_output( - pipe_output: PipeOutput, - pipeline_run_id: str = "", - created_at: str = "", - pipeline_state: PipelineState = PipelineState.COMPLETED, - finished_at: str | None = None, - ) -> PipelineResponse: - """Create a PipelineResponse from a PipeOutput object. - - Args: - pipe_output: The PipeOutput to convert - pipeline_run_id: Unique identifier for the pipeline run - created_at: Timestamp when the pipeline was created - pipeline_state: Current state of the pipeline - finished_at: Timestamp when the pipeline finished - Returns: - PipelineResponse with the pipe output serialized to reduced format - - """ - return PipelineResponse( - pipeline_run_id=pipeline_run_id, - created_at=created_at, - pipeline_state=pipeline_state, - finished_at=finished_at, - pipe_output=DictPipeOutput( - working_memory=PipelineResponseFactory._serialize_working_memory_with_dict_stuffs(pipe_output.working_memory), - pipeline_run_id=pipe_output.pipeline_run_id, - graph_spec=pipe_output.graph_spec, - ), - main_stuff_name=pipe_output.working_memory.aliases.get(MAIN_STUFF_NAME, MAIN_STUFF_NAME), - ) - - @staticmethod - def make_from_api_response(response: dict[str, Any]) -> PipelineResponse: - """Create a PipelineResponse from an API response dictionary. - - Args: - response: Dictionary containing the API response data - - Returns: - PipelineResponse instance created from the response data - - """ - return PipelineResponse.model_validate(response) diff --git a/pipelex/client/protocol.py b/pipelex/client/protocol.py deleted file mode 100644 index 6eeb93f2b..000000000 --- a/pipelex/client/protocol.py +++ /dev/null @@ -1,185 +0,0 @@ -from abc import abstractmethod -from typing import Any, Protocol, Sequence - -from pydantic import BaseModel, model_validator -from pydantic.functional_validators import SkipValidation -from typing_extensions import Annotated, runtime_checkable - -from pipelex.base_exceptions import PipelexError -from pipelex.core.memory.working_memory import WorkingMemory -from pipelex.core.pipes.pipe_output import DictPipeOutput -from pipelex.core.pipes.variable_multiplicity import VariableMultiplicity -from pipelex.core.stuffs.stuff import DictStuff -from pipelex.core.stuffs.stuff_content import StuffContent -from pipelex.types import StrEnum - -# StuffContentOrData represents all possible formats for pipeline inputs input: -# Case 1: Direct content (no 'concept' key) -# - 1.1: str (simple string) -# - 1.2: Sequence[str] (list of strings) -# - 1.3: StuffContent (a StuffContent object) -# - 1.4: Sequence[StuffContent] (list of StuffContent objects, covariant) -# - 1.5: ListContent[StuffContent] (a ListContent object containing StuffContent items) -# Case 2: Dict with 'concept' AND 'content' keys -# - 2.1: {"concept": str, "content": str} -# - 2.2: {"concept": str, "content": Sequence[str]} -# - 2.3: {"concept": str, "content": StuffContent} -# - 2.4: {"concept": str, "content": Sequence[StuffContent]} -# - 2.5: {"concept": str, "content": dict[str, Any]} -# - 2.6: {"concept": str, "content": Sequence[dict[str, Any]} -# Note: Case 2 formats can be provided as plain dict or DictStuff instance -StuffContentOrData = ( - str # Case 1.1 - | Sequence[str] # Case 1.2 - | StuffContent # Case 1.3 (also covers Case 1.5 as ListContent is a StuffContent) - | Sequence[StuffContent] # Case 1.4 (covariant - accepts list[TextContent], etc.) - | dict[str, Any] # Case 2.1-2.7 - plain dicts with {"concept": str, "content": Any} structure - | DictStuff # Case 2.7 - DictStuff instances (same structure as dict but as Pydantic model) -) -PipelineInputs = dict[str, StuffContentOrData] # Can include both dict and StuffContent - - -class PipelineRequestError(PipelexError): - pass - - -class PipelineRequest(BaseModel): - """Request for executing a pipeline. - - Attributes: - pipe_code (str | None): Code of the pipe to execute - plx_content (str | None): Content of the pipeline bundle to execute - inputs (PipelineInputs | None): Inputs in PipelineInputs format - Pydantic validation is skipped - to preserve the flexible format (dicts, strings, StuffContent objects, etc.) - output_name (str | None): Name of the output slot to write to - output_multiplicity (PipeOutputMultiplicity | None): Output multiplicity setting - dynamic_output_concept_code (str | None): Override for the dynamic output concept code - - """ - - pipe_code: str | None = None - plx_content: str | None = None - inputs: Annotated[PipelineInputs | None, SkipValidation] = None - output_name: str | None = None - output_multiplicity: VariableMultiplicity | None = None - dynamic_output_concept_code: str | None = None - - @model_validator(mode="before") - @classmethod - def validate_request(cls, values: dict[str, Any]): - if values.get("pipe_code") is None and values.get("plx_content") is None: - msg = ( - "pipe_code and plx_content cannot be None together. Its either: Both of them, or if there is no plx_content, " - "then pipe_code must be provided and must reference a pipe already registered in the library." - "If plx_content is provided but no pipe_code, plx_content must have a main_pipe property." - ) - raise PipelineRequestError(msg) - return values - - -class PipelineState(StrEnum): - """Enum representing the possible states of a pipe execution.""" - - RUNNING = "RUNNING" - COMPLETED = "COMPLETED" - FAILED = "FAILED" - CANCELLED = "CANCELLED" - ERROR = "ERROR" - STARTED = "STARTED" - - -class PipelineResponse(BaseModel): - """Response for pipeline execution requests. - - Attributes: - pipeline_run_id (str): Unique identifier for the pipeline run - created_at (str): Timestamp when the pipeline was created - pipeline_state (PipelineState): Current state of the pipeline - finished_at (str | None): Timestamp when the pipeline finished, if completed - pipe_output (DictPipeOutput | None): Output data from the pipeline execution (working_memory dict + pipeline_run_id) - main_stuff_name (str | None): Name of the main stuff in the pipeline output - - """ - - pipeline_run_id: str - created_at: str - pipeline_state: PipelineState - finished_at: str | None = None - pipe_output: DictPipeOutput | None = None - main_stuff_name: str | None = None - - -@runtime_checkable -class PipelexProtocol(Protocol): - """Protocol defining the contract for the Pipelex API. - - This protocol specifies the interface that any Pipelex API implementation must adhere to. - All methods are asynchronous and handle pipeline execution, monitoring, and control. - - Attributes: - api_token (str): Authentication token for API access - api_base_url (str): Base URL for the API - - """ - - api_token: str - api_base_url: str - - @abstractmethod - async def execute_pipeline( - self, - pipe_code: str | None = None, - plx_content: str | None = None, - inputs: PipelineInputs | WorkingMemory | None = None, - output_name: str | None = None, - output_multiplicity: VariableMultiplicity | None = None, - dynamic_output_concept_code: str | None = None, - ) -> PipelineResponse: - """Execute a pipeline synchronously and wait for its completion. - - Args: - pipe_code (str): The code identifying the pipeline to execute - plx_content (str | None): Content of the pipeline bundle to execute - inputs (PipelineInputs | WorkingMemory | None): Inputs passed to the pipeline - output_name (str | None): Target output slot name - output_multiplicity (PipeOutputMultiplicity | None): Output multiplicity setting - dynamic_output_concept_code (str | None): Override for dynamic output concept - Returns: - PipelineResponse: Complete execution results including pipeline state and output - - Raises: - HTTPException: On execution failure or error - ClientAuthenticationError: If API token is missing for API execution - - """ - ... - - @abstractmethod - async def start_pipeline( - self, - pipe_code: str | None = None, - plx_content: str | None = None, - inputs: PipelineInputs | WorkingMemory | None = None, - output_name: str | None = None, - output_multiplicity: VariableMultiplicity | None = None, - dynamic_output_concept_code: str | None = None, - ) -> PipelineResponse: - """Start a pipeline execution asynchronously without waiting for completion. - - Args: - pipe_code (str): The code identifying the pipeline to execute - plx_content (str | None): Content of the pipeline bundle to execute - inputs (PipelineInputs | WorkingMemory | None): Inputs passed to the pipeline - output_name (str | None): Target output slot name - output_multiplicity (PipeOutputMultiplicity | None): Output multiplicity setting - dynamic_output_concept_code (str | None): Override for dynamic output concept - - Returns: - PipelineResponse: Initial response with pipeline_run_id and created_at timestamp - - Raises: - HTTPException: On pipeline start failure - ClientAuthenticationError: If API token is missing for API execution - - """ - ... diff --git a/pipelex/cogt/models/model_deck.py b/pipelex/cogt/models/model_deck.py index 6823efe75..dcc70550f 100644 --- a/pipelex/cogt/models/model_deck.py +++ b/pipelex/cogt/models/model_deck.py @@ -629,8 +629,8 @@ def _resolve_waterfall( msg = ( f"Inference model fallback: '{ideal_model_handle}' was not found in the model deck, " f"so it was replaced by '{fallback}'. " - f"As a consequence, the results of the workflow may not have the expected quality, " - f"and the workflow might fail due to feature limitations such as context window size, etc. " + f"As a consequence, the results of the method may not have the expected quality, " + f"and the method might fail due to feature limitations such as context window size, etc. " f"Consider getting access to '{ideal_model_handle}'." ) enabled_backends = self._get_enabled_backends() diff --git a/pipelex/cogt/models/model_manager.py b/pipelex/cogt/models/model_manager.py index 91e6f423f..a13c72679 100644 --- a/pipelex/cogt/models/model_manager.py +++ b/pipelex/cogt/models/model_manager.py @@ -13,7 +13,7 @@ from pipelex.cogt.models.model_deck_loader import load_model_deck_blueprint from pipelex.cogt.models.model_manager_abstract import ModelManagerAbstract from pipelex.config import get_config -from pipelex.system.configuration.configs import ConfigPaths +from pipelex.system.configuration.config_loader import config_manager from pipelex.tools.misc.file_utils import find_files_in_dir from pipelex.tools.secrets.secrets_provider_abstract import SecretsProviderAbstract @@ -59,16 +59,16 @@ def setup( ) -> None: self.inference_backend_library.load( secrets_provider=secrets_provider, - backends_library_path=ConfigPaths.BACKENDS_FILE_PATH, - backends_dir_path=ConfigPaths.BACKENDS_DIR_PATH, + backends_library_path=config_manager.backends_file_path, + backends_dir_path=config_manager.backends_dir_path, gateway_model_specs=gateway_model_specs, ) enabled_backends = self.inference_backend_library.all_enabled_backends() self._routing_profile = load_active_routing_profile( - routing_profile_library_path=ConfigPaths.ROUTING_PROFILES_FILE_PATH, + routing_profile_library_path=config_manager.routing_profiles_file_path, enabled_backends=enabled_backends, ) - model_deck_paths = ModelManager.get_model_deck_paths(deck_dir_path=ConfigPaths.MODEL_DECKS_DIR_PATH) + model_deck_paths = ModelManager.get_model_deck_paths(deck_dir_path=config_manager.model_decks_dir_path) deck_blueprint = load_model_deck_blueprint(model_deck_paths=model_deck_paths) self.model_deck = self.build_deck(enabled_backends=enabled_backends, model_deck_blueprint=deck_blueprint) diff --git a/pipelex/core/bundles/pipelex_bundle_blueprint.py b/pipelex/core/bundles/pipelex_bundle_blueprint.py index cbf104be7..a40175742 100644 --- a/pipelex/core/bundles/pipelex_bundle_blueprint.py +++ b/pipelex/core/bundles/pipelex_bundle_blueprint.py @@ -10,8 +10,10 @@ from pipelex.core.domains.validation import validate_domain_code from pipelex.core.pipes.validation import is_pipe_code_valid from pipelex.core.pipes.variable_multiplicity import parse_concept_with_multiplicity +from pipelex.core.qualified_ref import QualifiedRef, QualifiedRefError from pipelex.pipe_controllers.batch.pipe_batch_blueprint import PipeBatchBlueprint from pipelex.pipe_controllers.condition.pipe_condition_blueprint import PipeConditionBlueprint +from pipelex.pipe_controllers.condition.special_outcome import SpecialOutcome from pipelex.pipe_controllers.parallel.pipe_parallel_blueprint import PipeParallelBlueprint from pipelex.pipe_controllers.sequence.pipe_sequence_blueprint import PipeSequenceBlueprint from pipelex.pipe_operators.compose.pipe_compose_blueprint import PipeComposeBlueprint @@ -123,18 +125,19 @@ def validate_local_concept_references(self) -> Self: undeclared_refs: list[str] = [] for concept_ref_or_code, context in all_refs: - # Determine if this is a local reference or an external one - if "." in concept_ref_or_code: - # It's a concept ref (domain.ConceptCode) - domain, concept_code = concept_ref_or_code.split(".", 1) - if domain != self.domain: - # External reference - skip validation (will be validated when loading dependencies) - continue - else: - # It's a bare concept code - always local - concept_code = concept_ref_or_code - - # Validate local reference + # Cross-package references are validated at package level, not bundle level + if QualifiedRef.has_cross_package_prefix(concept_ref_or_code): + continue + + # Parse the reference using QualifiedRef + ref = QualifiedRef.parse(concept_ref_or_code) + + if ref.is_external_to(self.domain): + # External reference - skip validation (will be validated when loading dependencies) + continue + + # Local reference (bare code or same domain) - validate + concept_code = ref.local_code if concept_code not in declared_concepts and concept_code not in native_codes: undeclared_refs.append(f"'{concept_ref_or_code}' in {context}") @@ -148,6 +151,85 @@ def validate_local_concept_references(self) -> Self: raise ValueError(msg) return self + @model_validator(mode="after") + def validate_local_pipe_references(self) -> Self: + """Validate that domain-qualified pipe references pointing to this bundle's domain exist locally. + + Three categories: + - Bare refs (no dot): no validation here (deferred to package-level resolution) + - Domain-qualified, same domain: must exist in self.pipe + - Domain-qualified, different domain: skip (external, validated at load time) + + Special outcomes ("fail", "continue") are excluded from validation. + """ + declared_pipes: set[str] = set(self.pipe.keys()) if self.pipe else set() + special_outcomes = SpecialOutcome.value_list() + all_pipe_refs = self.collect_pipe_references() + + invalid_refs: list[str] = [] + for pipe_ref_str, context in all_pipe_refs: + # Skip special outcomes + if pipe_ref_str in special_outcomes: + continue + + # Cross-package references are validated at package level, not bundle level + if QualifiedRef.has_cross_package_prefix(pipe_ref_str): + continue + + # Try to parse as a pipe ref + try: + ref = QualifiedRef.parse_pipe_ref(pipe_ref_str) + except QualifiedRefError: + # If it doesn't parse as a valid pipe ref, skip (will be caught elsewhere) + continue + + if not ref.is_qualified: + # Bare ref - no validation at bundle level + continue + + if ref.is_external_to(self.domain): + # External domain - skip + continue + + # Same domain, qualified ref - must exist locally + if ref.local_code not in declared_pipes: + invalid_refs.append(f"'{pipe_ref_str}' in {context}") + + if invalid_refs: + msg = ( + f"The following same-domain pipe references are not declared in domain '{self.domain}' " + f"at '{self.source}': {', '.join(invalid_refs)}. " + f"Declared pipes: {sorted(declared_pipes) if declared_pipes else '(none)'}" + ) + raise ValueError(msg) + return self + + def collect_pipe_references(self) -> list[tuple[str, str]]: + """Collect all pipe references from controller blueprints. + + Returns: + List of (pipe_ref_string, context_description) tuples + """ + pipe_refs: list[tuple[str, str]] = [] + if not self.pipe: + return pipe_refs + + for pipe_code, pipe_blueprint in self.pipe.items(): + if isinstance(pipe_blueprint, PipeSequenceBlueprint): + for step_index, step in enumerate(pipe_blueprint.steps): + pipe_refs.append((step.pipe, f"pipe.{pipe_code}.steps[{step_index}].pipe")) + elif isinstance(pipe_blueprint, PipeBatchBlueprint): + pipe_refs.append((pipe_blueprint.branch_pipe_code, f"pipe.{pipe_code}.branch_pipe_code")) + elif isinstance(pipe_blueprint, PipeConditionBlueprint): + for outcome_key, outcome_pipe in pipe_blueprint.outcomes.items(): + pipe_refs.append((outcome_pipe, f"pipe.{pipe_code}.outcomes[{outcome_key}]")) + pipe_refs.append((pipe_blueprint.default_outcome, f"pipe.{pipe_code}.default_outcome")) + elif isinstance(pipe_blueprint, PipeParallelBlueprint): + for branch_index, branch in enumerate(pipe_blueprint.branches): + pipe_refs.append((branch.pipe, f"pipe.{pipe_code}.branches[{branch_index}].pipe")) + + return pipe_refs + def _collect_local_concept_references(self) -> list[tuple[str, str]]: local_refs: list[tuple[str, str]] = [] diff --git a/pipelex/core/concepts/concept.py b/pipelex/core/concepts/concept.py index 2b3bd8740..478d8ce89 100644 --- a/pipelex/core/concepts/concept.py +++ b/pipelex/core/concepts/concept.py @@ -1,7 +1,8 @@ -from typing import Any +from typing import Any, Callable from kajson.kajson_manager import KajsonManager -from pydantic import BaseModel, ConfigDict, field_validator +from mthds.models.concept import ConceptAbstract +from pydantic import field_validator from pipelex import log from pipelex.base_exceptions import PipelexUnexpectedError @@ -15,22 +16,14 @@ from pipelex.core.domains.domain import SpecialDomain from pipelex.core.domains.exceptions import DomainCodeError from pipelex.core.domains.validation import validate_domain_code +from pipelex.core.qualified_ref import QualifiedRef from pipelex.core.stuffs.image_field_search import search_for_nested_image_fields from pipelex.core.stuffs.stuff_content import StuffContent from pipelex.tools.misc.string_utils import pascal_case_to_sentence from pipelex.tools.typing.class_utils import are_classes_equivalent, has_compatible_field -class Concept(BaseModel): - model_config = ConfigDict(extra="forbid", strict=True) - - code: str - domain_code: str - description: str - structure_class_name: str - # TODO: rethink this refines field here. - refines: str | None = None - +class Concept(ConceptAbstract): @field_validator("code") @classmethod def validate_code(cls, code: str) -> str: @@ -74,10 +67,6 @@ def validate_refines(cls, refines: str | None) -> str | None: msg = f"Refines '{refines}' must be a valid concept ref (domain.ConceptCode) or concept code (PascalCase)" raise ConceptValueError(msg) - @property - def concept_ref(self) -> str: - return f"{self.domain_code}.{self.code}" - @property def simple_concept_ref(self) -> str: if SpecialDomain.is_native(domain_code=self.domain_code): @@ -94,7 +83,13 @@ def is_native_concept(cls, concept: "Concept") -> bool: return NativeConceptCode.is_native_concept_ref_or_code(concept_ref_or_code=concept.concept_ref) @classmethod - def are_concept_compatible(cls, concept_1: "Concept", concept_2: "Concept", strict: bool = False) -> bool: + def are_concept_compatible( + cls, + concept_1: "Concept", + concept_2: "Concept", + strict: bool = False, + concept_resolver: Callable[[str], "Concept | None"] | None = None, + ) -> bool: if NativeConceptCode.is_dynamic_concept(concept_code=concept_1.code): return True if NativeConceptCode.is_dynamic_concept(concept_code=concept_2.code): @@ -105,12 +100,31 @@ def are_concept_compatible(cls, concept_1: "Concept", concept_2: "Concept", stri return True # If concept_1 refines concept_2 by string, they are strictly compatible - if concept_1.refines is not None and concept_1.refines == concept_2.concept_ref: - return True + if concept_1.refines is not None: + if concept_1.refines == concept_2.concept_ref: + return True + # Cross-package refines: resolve the aliased ref and compare concept_refs + if QualifiedRef.has_cross_package_prefix(concept_1.refines) and concept_resolver is not None: + resolved = concept_resolver(concept_1.refines) + if resolved is not None and resolved.concept_ref == concept_2.concept_ref: + return True # If both concepts refine the same concept, they are compatible - if concept_1.refines is not None and concept_2.refines is not None and concept_1.refines == concept_2.refines: - return True + if concept_1.refines is not None and concept_2.refines is not None: + refines_1 = concept_1.refines + refines_2 = concept_2.refines + # Resolve cross-package refines through the resolver + if concept_resolver is not None: + if QualifiedRef.has_cross_package_prefix(refines_1): + resolved_1 = concept_resolver(refines_1) + if resolved_1 is not None: + refines_1 = resolved_1.concept_ref + if QualifiedRef.has_cross_package_prefix(refines_2): + resolved_2 = concept_resolver(refines_2) + if resolved_2 is not None: + refines_2 = resolved_2.concept_ref + if refines_1 == refines_2: + return True # Check class-based compatibility # This now works even when one or both concepts have refines, since we generate diff --git a/pipelex/core/concepts/concept_factory.py b/pipelex/core/concepts/concept_factory.py index 7e2ef1725..9f9c2732a 100644 --- a/pipelex/core/concepts/concept_factory.py +++ b/pipelex/core/concepts/concept_factory.py @@ -16,12 +16,13 @@ from pipelex.core.concepts.structure_generation.generator import StructureGenerator from pipelex.core.concepts.validation import validate_concept_ref_or_code from pipelex.core.domains.domain import SpecialDomain +from pipelex.core.qualified_ref import QualifiedRef from pipelex.core.stuffs.text_content import TextContent from pipelex.types import StrEnum class ConceptDeclarationType(StrEnum): - """Enum representing the 5 ways a concept can be declared in PLX files. + """Enum representing the 5 ways a concept can be declared in MTHDS files. Option 1: STRING - Concept is defined as a string Example: @@ -168,6 +169,15 @@ def make_domain_and_concept_code_from_concept_ref_or_code( concept_ref_or_code: str, domain_code: str | None = None, ) -> DomainAndConceptCode: + # Handle cross-package references (alias->domain.ConceptCode) + if QualifiedRef.has_cross_package_prefix(concept_ref_or_code): + alias, remainder = QualifiedRef.split_cross_package_ref(concept_ref_or_code) + ref = QualifiedRef.parse_concept_ref(remainder) + if ref.domain_path is None: + msg = f"Cross-package concept ref '{concept_ref_or_code}' must include a domain" + raise ConceptFactoryError(msg) + return DomainAndConceptCode(domain_code=f"{alias}->{ref.domain_path}", concept_code=ref.local_code) + if "." not in concept_ref_or_code and not domain_code: msg = f"Not enough information to make a domain and concept code from '{concept_ref_or_code}'" raise ConceptFactoryError(msg) @@ -178,12 +188,14 @@ def make_domain_and_concept_code_from_concept_ref_or_code( raise ConceptFactoryError(msg) from exc if NativeConceptCode.is_native_concept_ref_or_code(concept_ref_or_code=concept_ref_or_code): - natice_concept_ref = NativeConceptCode.get_validated_native_concept_ref(concept_ref_or_code=concept_ref_or_code) - return DomainAndConceptCode(domain_code=SpecialDomain.NATIVE, concept_code=natice_concept_ref.split(".")[1]) + native_concept_ref = NativeConceptCode.get_validated_native_concept_ref(concept_ref_or_code=concept_ref_or_code) + ref = QualifiedRef.parse(native_concept_ref) + return DomainAndConceptCode(domain_code=SpecialDomain.NATIVE, concept_code=ref.local_code) if "." in concept_ref_or_code: - domain_code, concept_code = concept_ref_or_code.rsplit(".") - return DomainAndConceptCode(domain_code=domain_code, concept_code=concept_code) + ref = QualifiedRef.parse(concept_ref_or_code) + assert ref.domain_path is not None + return DomainAndConceptCode(domain_code=ref.domain_path, concept_code=ref.local_code) elif domain_code: return DomainAndConceptCode(domain_code=domain_code, concept_code=concept_ref_or_code) else: @@ -214,6 +226,7 @@ def make_refine(cls, refine: str, domain_code: str) -> str: it will be normalized to include the native domain prefix (e.g., 'native.Text'). If the refine is a local concept code without domain (e.g., 'MyCustomConcept'), it will be prefixed with the given domain_code. + Cross-package refs (e.g., 'alias->domain.Concept') are passed through as-is. Args: refine: The refine string to validate and normalize @@ -226,6 +239,9 @@ def make_refine(cls, refine: str, domain_code: str) -> str: ConceptFactoryError: If the refine is invalid """ + # Cross-package refs pass through unchanged + if QualifiedRef.has_cross_package_prefix(refine): + return refine if NativeConceptCode.is_native_concept_ref_or_code(concept_ref_or_code=refine): return NativeConceptCode.get_validated_native_concept_ref(concept_ref_or_code=refine) elif "." in refine: @@ -362,10 +378,31 @@ def _handle_refines( msg = f"Could not validate refine '{blueprint.refines}' for concept '{concept_code}' in domain '{domain_code}': {exc}" raise ConceptFactoryError(msg) from exc + # Cross-package refines: base class isn't available locally, so generate + # a standalone TextContent subclass. The refinement relationship is tracked + # in the concept model's refines field for runtime compatibility checks. + if QualifiedRef.has_cross_package_prefix(current_refine): + try: + _, the_generated_class = StructureGenerator().generate_from_structure_blueprint( + class_name=concept_code, + structure_blueprint={}, + description=blueprint.description, + ) + except ConceptStructureGeneratorError as exc: + msg = ( + f"Error generating structure class for concept '{concept_code}' " + f"with cross-package refines '{current_refine}' in domain '{domain_code}': {exc}" + ) + raise ConceptFactoryError(msg) from exc + + KajsonManager.get_class_registry().register_class(the_generated_class) + return concept_code, current_refine + # Get the refined concept's structure class name # For native concepts, the structure class name is "ConceptCode" + "Content" (e.g., TextContent) # For custom concepts, the structure class name is just the concept code (e.g., Customer) - refined_concept_code = current_refine.split(".")[1] + refined_ref = QualifiedRef.parse(current_refine) + refined_concept_code = refined_ref.local_code if NativeConceptCode.is_native_concept_ref_or_code(concept_ref_or_code=current_refine): refined_structure_class_name = refined_concept_code + "Content" else: diff --git a/pipelex/core/concepts/helpers.py b/pipelex/core/concepts/helpers.py index ce7040873..bf17699a2 100644 --- a/pipelex/core/concepts/helpers.py +++ b/pipelex/core/concepts/helpers.py @@ -4,6 +4,7 @@ from pipelex.core.concepts.concept_structure_blueprint import ConceptStructureBlueprint, ConceptStructureBlueprintFieldType from pipelex.core.concepts.validation import is_concept_ref_or_code_valid +from pipelex.core.qualified_ref import QualifiedRef if TYPE_CHECKING: from pipelex.core.concepts.concept_blueprint import ConceptBlueprint @@ -35,10 +36,8 @@ def get_structure_class_name_from_blueprint( raise ValueError(msg) # Extract concept_code from concept_ref_or_code - if "." in concept_ref_or_code: - concept_code = concept_ref_or_code.rsplit(".", maxsplit=1)[-1] - else: - concept_code = concept_ref_or_code + ref = QualifiedRef.parse(concept_ref_or_code) + concept_code = ref.local_code if isinstance(blueprint_or_string_description, str): return concept_code @@ -101,6 +100,5 @@ def extract_concept_code_from_concept_ref_or_code(concept_ref_or_code: str) -> s msg = f"Invalid concept_ref_or_code: '{concept_ref_or_code}' for extracting concept code" raise ValueError(msg) - if "." in concept_ref_or_code: - return concept_ref_or_code.rsplit(".", maxsplit=1)[-1] - return concept_ref_or_code + ref = QualifiedRef.parse(concept_ref_or_code) + return ref.local_code diff --git a/pipelex/core/concepts/native/concept_native.py b/pipelex/core/concepts/native/concept_native.py index f6cbcee27..bba314e77 100644 --- a/pipelex/core/concepts/native/concept_native.py +++ b/pipelex/core/concepts/native/concept_native.py @@ -1,6 +1,7 @@ from pipelex.core.concepts.native.exceptions import NativeConceptDefinitionError from pipelex.core.concepts.validation import is_concept_ref_or_code_valid from pipelex.core.domains.domain import SpecialDomain +from pipelex.core.qualified_ref import QualifiedRef from pipelex.core.stuffs.document_content import DocumentContent from pipelex.core.stuffs.dynamic_content import DynamicContent from pipelex.core.stuffs.html_content import HtmlContent @@ -160,8 +161,9 @@ def is_native_concept_ref_or_code(cls, concept_ref_or_code: str) -> bool: return False if "." in concept_ref_or_code: - domain_code, concept_code = concept_ref_or_code.split(".", 1) - return SpecialDomain.is_native(domain_code=domain_code) and concept_code in cls.values_list() + ref = QualifiedRef.parse(concept_ref_or_code) + assert ref.domain_path is not None + return SpecialDomain.is_native(domain_code=ref.domain_path) and ref.local_code in cls.values_list() return concept_ref_or_code in cls.values_list() @classmethod @@ -179,8 +181,9 @@ def is_valid_native_concept_ref(cls, concept_ref: str) -> bool: """ if "." not in concept_ref: return False - domain_code, concept_code = concept_ref.split(".", 1) - return SpecialDomain.is_native(domain_code=domain_code) and concept_code in cls.values_list() + ref = QualifiedRef.parse(concept_ref) + assert ref.domain_path is not None + return SpecialDomain.is_native(domain_code=ref.domain_path) and ref.local_code in cls.values_list() @classmethod def validate_native_concept_ref_or_code(cls, concept_ref_or_code: str) -> None: diff --git a/pipelex/core/concepts/structure_generation/generator.py b/pipelex/core/concepts/structure_generation/generator.py index 0a57301c3..061435ec4 100644 --- a/pipelex/core/concepts/structure_generation/generator.py +++ b/pipelex/core/concepts/structure_generation/generator.py @@ -89,7 +89,7 @@ def generate_from_structure_blueprint( "\n" "If you want to customize this structure:\n" " 1. Copy this file to your own module\n" - " 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file\n" + " 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file\n" " and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition)\n" " 3. Make sure your custom class is importable and registered\n" "\n" diff --git a/pipelex/core/concepts/validation.py b/pipelex/core/concepts/validation.py index 67448ee13..7301eca71 100644 --- a/pipelex/core/concepts/validation.py +++ b/pipelex/core/concepts/validation.py @@ -1,5 +1,5 @@ from pipelex.core.concepts.exceptions import ConceptCodeError, ConceptStringError -from pipelex.core.domains.validation import is_domain_code_valid +from pipelex.core.qualified_ref import QualifiedRef, QualifiedRefError from pipelex.tools.misc.string_utils import is_pascal_case @@ -14,40 +14,46 @@ def validate_concept_code(concept_code: str) -> None: def is_concept_ref_valid(concept_ref: str) -> bool: - if "." not in concept_ref: + """Check if a concept reference (domain.ConceptCode) is valid. + + Supports hierarchical domains: "legal.contracts.NonCompeteClause" is valid. + Supports cross-package refs: "alias->domain.ConceptCode" is valid. + """ + if QualifiedRef.has_cross_package_prefix(concept_ref): + _, remainder = QualifiedRef.split_cross_package_ref(concept_ref) + return is_concept_ref_valid(concept_ref=remainder) + try: + ref = QualifiedRef.parse_concept_ref(concept_ref) + except QualifiedRefError: return False - - if concept_ref.count(".") > 1: - return False - - domain, concept_code = concept_ref.split(".", 1) - - # Validate domain - if not is_domain_code_valid(code=domain): - return False - - # Validate concept code - return is_concept_code_valid(concept_code=concept_code) + return ref.is_qualified def validate_concept_ref(concept_ref: str) -> None: if not is_concept_ref_valid(concept_ref=concept_ref): msg = ( f"Concept string '{concept_ref}' is not a valid concept string. It must be in the format 'domain.ConceptCode': " - " - domain: a valid domain code (snake_case), " + " - domain: a valid domain code (snake_case, possibly hierarchical like legal.contracts), " " - ConceptCode: a valid concept code (PascalCase)" ) raise ConceptStringError(msg) def is_concept_ref_or_code_valid(concept_ref_or_code: str) -> bool: - if concept_ref_or_code.count(".") > 1: - return False + """Check if a concept reference or bare code is valid. - if concept_ref_or_code.count(".") == 1: + Supports hierarchical domains: "legal.contracts.NonCompeteClause" is valid. + Bare codes must be PascalCase: "NonCompeteClause" is valid. + Supports cross-package refs: "alias->domain.ConceptCode" is valid. + """ + if not concept_ref_or_code: + return False + if QualifiedRef.has_cross_package_prefix(concept_ref_or_code): + _, remainder = QualifiedRef.split_cross_package_ref(concept_ref_or_code) + return is_concept_ref_or_code_valid(concept_ref_or_code=remainder) + if "." in concept_ref_or_code: return is_concept_ref_valid(concept_ref=concept_ref_or_code) - else: - return is_concept_code_valid(concept_code=concept_ref_or_code) + return is_concept_code_valid(concept_code=concept_ref_or_code) def validate_concept_ref_or_code(concept_ref_or_code: str) -> None: diff --git a/pipelex/core/domains/validation.py b/pipelex/core/domains/validation.py index 9d3c1f00b..8d6543b14 100644 --- a/pipelex/core/domains/validation.py +++ b/pipelex/core/domains/validation.py @@ -1,12 +1,29 @@ +from typing import Any + from pipelex.core.domains.exceptions import DomainCodeError +from pipelex.core.qualified_ref import QualifiedRef from pipelex.tools.misc.string_utils import is_snake_case -def is_domain_code_valid(code: str) -> bool: - return is_snake_case(code) +def is_domain_code_valid(code: Any) -> bool: + """Check if a domain code is valid. + + Accepts single-segment (e.g. "legal") and hierarchical dotted paths + (e.g. "legal.contracts", "legal.contracts.shareholder"). + Each segment must be snake_case. + Supports cross-package domain codes (e.g. "alias->scoring"). + """ + if not isinstance(code, str): + return False + if QualifiedRef.has_cross_package_prefix(code): + _, remainder = QualifiedRef.split_cross_package_ref(code) + return is_domain_code_valid(code=remainder) + if not code or code.startswith(".") or code.endswith(".") or ".." in code: + return False + return all(is_snake_case(segment) for segment in code.split(".")) def validate_domain_code(code: str) -> None: if not is_domain_code_valid(code=code): - msg = f"Domain code '{code}' is not a valid domain code. It should be in snake_case." + msg = f"Domain code '{code}' is not a valid domain code. It should be in snake_case (segments separated by dots for hierarchical domains)." raise DomainCodeError(msg) diff --git a/pipelex/core/interpreter/exceptions.py b/pipelex/core/interpreter/exceptions.py index 6b5c4125b..70e9fce4c 100644 --- a/pipelex/core/interpreter/exceptions.py +++ b/pipelex/core/interpreter/exceptions.py @@ -15,5 +15,5 @@ def __init__( super().__init__(message) -class PLXDecodeError(TomlError): - """Raised when PLX decoding fails.""" +class MthdsDecodeError(TomlError): + """Raised when MTHDS decoding fails.""" diff --git a/pipelex/core/interpreter/helpers.py b/pipelex/core/interpreter/helpers.py index 517994258..3c50de101 100644 --- a/pipelex/core/interpreter/helpers.py +++ b/pipelex/core/interpreter/helpers.py @@ -2,17 +2,19 @@ from pipelex.types import StrEnum +MTHDS_EXTENSION = ".mthds" + def is_pipelex_file(file_path: Path) -> bool: - """Check if a file is a Pipelex PLX file based on its extension. + """Check if a file is a Pipelex MTHDS file based on its extension. Args: file_path: Path to the file to check Returns: - True if the file has .plx extension, False otherwise + True if the file has .mthds extension, False otherwise """ - return file_path.suffix == ".plx" + return file_path.suffix == MTHDS_EXTENSION class ValidationErrorScope(StrEnum): diff --git a/pipelex/core/interpreter/interpreter.py b/pipelex/core/interpreter/interpreter.py index 6ae158f23..d6ece605d 100644 --- a/pipelex/core/interpreter/interpreter.py +++ b/pipelex/core/interpreter/interpreter.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, ValidationError from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError from pipelex.core.interpreter.validation_error_categorizer import PIPELEX_BUNDLE_BLUEPRINT_SOURCE_FIELD, categorize_blueprint_validation_error from pipelex.tools.misc.toml_utils import TomlError, load_toml_from_content, load_toml_from_path from pipelex.tools.typing.pydantic_utils import format_pydantic_validation_error @@ -14,25 +14,25 @@ class PipelexInterpreter(BaseModel): - """plx -> PipelexBundleBlueprint""" + """MTHDS -> PipelexBundleBlueprint""" @classmethod - def make_pipelex_bundle_blueprint(cls, bundle_path: Path | None = None, plx_content: str | None = None) -> PipelexBundleBlueprint: + def make_pipelex_bundle_blueprint(cls, bundle_path: Path | None = None, mthds_content: str | None = None) -> PipelexBundleBlueprint: blueprint_dict: dict[str, Any] try: if bundle_path is not None: blueprint_dict = load_toml_from_path(path=str(bundle_path)) blueprint_dict[PIPELEX_BUNDLE_BLUEPRINT_SOURCE_FIELD] = str(bundle_path) - elif plx_content is not None: - blueprint_dict = load_toml_from_content(content=plx_content) + elif mthds_content is not None: + blueprint_dict = load_toml_from_content(content=mthds_content) else: - msg = "Either 'bundle_path' or 'plx_content' must be provided for the PipelexInterpreter to make a PipelexBundleBlueprint" + msg = "Either 'bundle_path' or 'mthds_content' must be provided for the PipelexInterpreter to make a PipelexBundleBlueprint" raise PipelexInterpreterError(msg) except TomlError as exc: - raise PLXDecodeError(message=exc.message, doc=exc.doc, pos=exc.pos, lineno=exc.lineno, colno=exc.colno) from exc + raise MthdsDecodeError(message=exc.message, doc=exc.doc, pos=exc.pos, lineno=exc.lineno, colno=exc.colno) from exc if not blueprint_dict: - msg = "Could not make 'PipelexBundleBlueprint': no blueprint found in the PLX file" + msg = "Could not make 'PipelexBundleBlueprint': no blueprint found in the MTHDS file" raise PipelexInterpreterError(msg) try: diff --git a/pipelex/core/memory/working_memory.py b/pipelex/core/memory/working_memory.py index 76b698752..bdb9454ee 100644 --- a/pipelex/core/memory/working_memory.py +++ b/pipelex/core/memory/working_memory.py @@ -1,7 +1,8 @@ from operator import attrgetter from typing import Any, cast -from pydantic import BaseModel, Field, model_validator +from mthds.models.working_memory import WorkingMemoryAbstract +from pydantic import Field, model_validator from typing_extensions import override from pipelex import log, pretty_print @@ -17,7 +18,7 @@ from pipelex.core.stuffs.list_content import ListContent from pipelex.core.stuffs.mermaid_content import MermaidContent from pipelex.core.stuffs.number_content import NumberContent -from pipelex.core.stuffs.stuff import DictStuff, Stuff +from pipelex.core.stuffs.stuff import Stuff from pipelex.core.stuffs.stuff_artefact import StuffArtefact from pipelex.core.stuffs.stuff_content import StuffContentType from pipelex.core.stuffs.text_and_images_content import TextAndImagesContent @@ -34,12 +35,7 @@ StuffArtefactDict = dict[str, StuffArtefact] -class DictWorkingMemory(BaseModel): - root: dict[str, DictStuff] - aliases: dict[str, str] - - -class WorkingMemory(BaseModel, ContextProviderAbstract): +class WorkingMemory(WorkingMemoryAbstract[Stuff], ContextProviderAbstract): root: StuffDict = Field(default_factory=dict) aliases: dict[str, str] = Field(default_factory=dict) diff --git a/pipelex/core/memory/working_memory_factory.py b/pipelex/core/memory/working_memory_factory.py index 5b841e36f..89859f590 100644 --- a/pipelex/core/memory/working_memory_factory.py +++ b/pipelex/core/memory/working_memory_factory.py @@ -1,8 +1,8 @@ import shortuuid +from mthds.models.pipeline_inputs import PipelineInputs from pydantic import BaseModel from pipelex import log -from pipelex.client.protocol import PipelineInputs from pipelex.cogt.content_generation.dry_run_factory import DryRunFactory from pipelex.core.memory.exceptions import WorkingMemoryFactoryError from pipelex.core.memory.working_memory import MAIN_STUFF_NAME, StuffDict, WorkingMemory diff --git a/pipelex/core/packages/__init__.py b/pipelex/core/packages/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pipelex/core/packages/bundle_scanner.py b/pipelex/core/packages/bundle_scanner.py new file mode 100644 index 000000000..8411a9959 --- /dev/null +++ b/pipelex/core/packages/bundle_scanner.py @@ -0,0 +1,87 @@ +from collections.abc import Iterable +from pathlib import Path + +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.core.interpreter.interpreter import PipelexInterpreter +from pipelex.core.packages.manifest import DomainExports + + +def scan_bundles_for_domain_info( + mthds_files: Iterable[Path], +) -> tuple[dict[str, list[str]], dict[str, str], list[PipelexBundleBlueprint], list[str]]: + """Scan .mthds files and extract domain/pipe information from their headers. + + Iterates over the given bundle files, parses each blueprint to collect + which pipes belong to which domains, and which domain has a main_pipe. + + Args: + mthds_files: Paths to .mthds files to scan + + Returns: + A tuple of (domain_pipes, domain_main_pipes, blueprints, errors) where: + - domain_pipes maps domain codes to their list of pipe codes + - domain_main_pipes maps domain codes to their main_pipe code + - blueprints is a list of successfully parsed PipelexBundleBlueprint objects + - errors is a list of "{path}: {exc}" strings for files that failed parsing + """ + domain_pipes: dict[str, list[str]] = {} + domain_main_pipes: dict[str, str] = {} + blueprints: list[PipelexBundleBlueprint] = [] + errors: list[str] = [] + + for mthds_file in mthds_files: + try: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file) + except Exception as exc: + errors.append(f"{mthds_file}: {exc}") + continue + + blueprints.append(blueprint) + + domain = blueprint.domain + if domain not in domain_pipes: + domain_pipes[domain] = [] + + if blueprint.pipe: + for pipe_code in blueprint.pipe: + domain_pipes[domain].append(pipe_code) + + if blueprint.main_pipe: + existing = domain_main_pipes.get(domain) + if existing and existing != blueprint.main_pipe: + errors.append(f"Conflicting main_pipe for domain '{domain}': '{existing}' vs '{blueprint.main_pipe}' (from {mthds_file})") + else: + domain_main_pipes[domain] = blueprint.main_pipe + + return domain_pipes, domain_main_pipes, blueprints, errors + + +def build_domain_exports_from_scan( + domain_pipes: dict[str, list[str]], + domain_main_pipes: dict[str, str], +) -> list[DomainExports]: + """Build a list of DomainExports from scan results, placing main_pipe first. + + For each domain (sorted alphabetically), creates a DomainExports entry with + the main_pipe listed first (if present), followed by remaining pipes sorted + alphabetically. Domains with zero exportable pipes are skipped. + + Args: + domain_pipes: Mapping of domain codes to their pipe codes + domain_main_pipes: Mapping of domain codes to their main_pipe code + + Returns: + List of DomainExports with deterministic ordering + """ + exports: list[DomainExports] = [] + for domain, pipe_codes in sorted(domain_pipes.items()): + exported: list[str] = [] + main_pipe = domain_main_pipes.get(domain) + if main_pipe and main_pipe not in exported: + exported.append(main_pipe) + for pipe_code in sorted(pipe_codes): + if pipe_code not in exported: + exported.append(pipe_code) + if exported: + exports.append(DomainExports(domain_path=domain, pipes=exported)) + return exports diff --git a/pipelex/core/packages/dependency_resolver.py b/pipelex/core/packages/dependency_resolver.py new file mode 100644 index 000000000..2a46eb555 --- /dev/null +++ b/pipelex/core/packages/dependency_resolver.py @@ -0,0 +1,590 @@ +# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false, reportUnknownArgumentType=false +import tempfile +from pathlib import Path +from typing import Any + +from pydantic import BaseModel, ConfigDict + +from pipelex import log +from pipelex.core.packages.discovery import MANIFEST_FILENAME, find_package_manifest +from pipelex.core.packages.exceptions import ( + DependencyResolveError, + ManifestError, + PackageCacheError, + TransitiveDependencyError, + VCSFetchError, + VersionResolutionError, +) +from pipelex.core.packages.manifest import MthdsPackageManifest, PackageDependency +from pipelex.core.packages.package_cache import get_cached_package_path, is_cached, store_in_cache +from pipelex.core.packages.vcs_resolver import address_to_clone_url, clone_at_version, list_remote_version_tags, resolve_version_from_tags +from pipelex.tools.misc.semver import parse_constraint, parse_version, select_minimum_version_for_multiple_constraints, version_satisfies + + +class ResolvedDependency(BaseModel): + """A resolved local dependency with its manifest and file paths.""" + + model_config = ConfigDict(frozen=True) + + alias: str + address: str + manifest: MthdsPackageManifest | None + package_root: Path + mthds_files: list[Path] + exported_pipe_codes: set[str] | None + + +def collect_mthds_files(directory: Path) -> list[Path]: + """Collect all .mthds files under a directory recursively. + + Args: + directory: The directory to scan + + Returns: + List of .mthds file paths found + """ + return sorted(directory.rglob("*.mthds")) + + +def determine_exported_pipes(manifest: MthdsPackageManifest | None) -> set[str] | None: + """Determine which pipes are exported by a dependency. + + Returns None when all pipes should be public (no manifest, or manifest + without an ``[[exports]]`` section). Returns a set of pipe codes when + the manifest explicitly declares exports (the set may be empty if + export entries list no pipes, meaning only ``main_pipe`` is public). + + Args: + manifest: The dependency's manifest (if any) + + Returns: + None if all pipes are public, or the set of explicitly exported pipe codes. + """ + if manifest is None: + return None + + # No exports section in manifest -> all pipes are public + if not manifest.exports: + return None + + exported: set[str] = set() + for domain_export in manifest.exports: + exported.update(domain_export.pipes) + + # Auto-export main_pipe from bundles (scan for main_pipe in bundle headers) + # This is done at loading time by LibraryManager, not here + return exported + + +def resolve_local_dependencies( + manifest: MthdsPackageManifest, + package_root: Path, +) -> list[ResolvedDependency]: + """Resolve dependencies that have a local `path` field. + + For each dependency with a `path`, resolves the directory, finds the manifest + and .mthds files, and determines exported pipes. + + Args: + manifest: The consuming package's manifest + package_root: The root directory of the consuming package + + Returns: + List of resolved dependencies (only those with a `path` field) + + Raises: + DependencyResolveError: If a path does not exist or is not a directory + """ + resolved: list[ResolvedDependency] = [] + + for dep in manifest.dependencies: + if dep.path is None: + log.verbose(f"Dependency '{dep.alias}' has no local path, skipping local resolution") + continue + + dep_dir = (package_root / dep.path).resolve() + if not dep_dir.exists(): + msg = f"Dependency '{dep.alias}' local path '{dep.path}' resolves to '{dep_dir}' which does not exist" + raise DependencyResolveError(msg) + if not dep_dir.is_dir(): + msg = f"Dependency '{dep.alias}' local path '{dep.path}' resolves to '{dep_dir}' which is not a directory" + raise DependencyResolveError(msg) + + # Find the dependency's manifest + dep_manifest: MthdsPackageManifest | None = None + dep_manifest_path = dep_dir / MANIFEST_FILENAME + if dep_manifest_path.is_file(): + try: + dep_manifest = find_package_manifest(dep_manifest_path) + except ManifestError as exc: + log.warning(f"Could not parse METHODS.toml for dependency '{dep.alias}': {exc.message}") + + # Collect .mthds files + mthds_files = collect_mthds_files(dep_dir) + + # Determine exported pipes + exported_pipe_codes = determine_exported_pipes(dep_manifest) + + resolved.append( + ResolvedDependency( + alias=dep.alias, + address=dep.address, + manifest=dep_manifest, + package_root=dep_dir, + mthds_files=mthds_files, + exported_pipe_codes=exported_pipe_codes, + ) + ) + export_count = len(exported_pipe_codes) if exported_pipe_codes is not None else "all" + log.verbose(f"Resolved dependency '{dep.alias}': {len(mthds_files)} .mthds files, {export_count} exported pipes") + + return resolved + + +def _find_manifest_in_dir(directory: Path) -> MthdsPackageManifest | None: + """Read and parse a METHODS.toml from a directory root. + + Args: + directory: The directory to look for METHODS.toml in. + + Returns: + The parsed manifest, or None if absent or unparseable. + """ + manifest_path = directory / MANIFEST_FILENAME + if not manifest_path.is_file(): + return None + try: + return find_package_manifest(manifest_path) + except ManifestError as exc: + log.warning(f"Could not parse METHODS.toml in '{directory}': {exc.message}") + return None + + +def _resolve_local_dependency( + dep: PackageDependency, + package_root: Path, +) -> ResolvedDependency: + """Resolve a single dependency that has a local path. + + Args: + dep: The dependency with a non-None ``path`` field. + package_root: The consuming package root. + + Returns: + The resolved dependency. + + Raises: + DependencyResolveError: If the path does not exist or is not a directory. + """ + local_path: str = dep.path # type: ignore[assignment] + dep_dir = (package_root / local_path).resolve() + if not dep_dir.exists(): + msg = f"Dependency '{dep.alias}' local path '{local_path}' resolves to '{dep_dir}' which does not exist" + raise DependencyResolveError(msg) + if not dep_dir.is_dir(): + msg = f"Dependency '{dep.alias}' local path '{local_path}' resolves to '{dep_dir}' which is not a directory" + raise DependencyResolveError(msg) + + dep_manifest = _find_manifest_in_dir(dep_dir) + mthds_files = collect_mthds_files(dep_dir) + exported_pipe_codes = determine_exported_pipes(dep_manifest) + + return ResolvedDependency( + alias=dep.alias, + address=dep.address, + manifest=dep_manifest, + package_root=dep_dir, + mthds_files=mthds_files, + exported_pipe_codes=exported_pipe_codes, + ) + + +def resolve_remote_dependency( + dep: PackageDependency, + cache_root: Path | None = None, + fetch_url_override: str | None = None, +) -> ResolvedDependency: + """Resolve a single dependency via VCS fetch (with cache). + + Orchestrates: get clone URL -> list remote tags -> MVS version selection -> + check cache -> clone if miss -> build ResolvedDependency. + + Args: + dep: The dependency to resolve (no ``path`` field). + cache_root: Override for the package cache root directory. + fetch_url_override: Override clone URL (e.g. ``file://`` for tests). + + Returns: + The resolved dependency. + + Raises: + DependencyResolveError: If fetching or version resolution fails. + """ + clone_url = fetch_url_override or address_to_clone_url(dep.address) + + # List remote tags and select version + try: + version_tags = list_remote_version_tags(clone_url) + selected_version, selected_tag = resolve_version_from_tags(version_tags, dep.version) + except (VCSFetchError, VersionResolutionError) as exc: + msg = f"Failed to resolve remote dependency '{dep.alias}' ({dep.address}): {exc}" + raise DependencyResolveError(msg) from exc + + version_str = str(selected_version) + + # Check cache + if is_cached(dep.address, version_str, cache_root): + cached_path = get_cached_package_path(dep.address, version_str, cache_root) + log.verbose(f"Dependency '{dep.alias}' ({dep.address}@{version_str}) found in cache") + return _build_resolved_from_dir(dep.alias, dep.address, cached_path) + + # Clone and cache + try: + with tempfile.TemporaryDirectory(prefix="mthds_clone_") as tmp_dir: + clone_dest = Path(tmp_dir) / "pkg" + clone_at_version(clone_url, selected_tag, clone_dest) + cached_path = store_in_cache(clone_dest, dep.address, version_str, cache_root) + except (VCSFetchError, PackageCacheError) as exc: + msg = f"Failed to fetch/cache dependency '{dep.alias}' ({dep.address}@{version_str}): {exc}" + raise DependencyResolveError(msg) from exc + + log.verbose(f"Dependency '{dep.alias}' ({dep.address}@{version_str}) fetched and cached") + return _build_resolved_from_dir(dep.alias, dep.address, cached_path) + + +def _build_resolved_from_dir(alias: str, address: str, directory: Path) -> ResolvedDependency: + """Build a ResolvedDependency from a package directory. + + Args: + alias: The dependency alias. + address: The package address. + directory: The package directory (local or cached). + + Returns: + The resolved dependency. + """ + dep_manifest = _find_manifest_in_dir(directory) + mthds_files = collect_mthds_files(directory) + exported_pipe_codes = determine_exported_pipes(dep_manifest) + + return ResolvedDependency( + alias=alias, + address=address, + manifest=dep_manifest, + package_root=directory, + mthds_files=mthds_files, + exported_pipe_codes=exported_pipe_codes, + ) + + +def _resolve_with_multiple_constraints( + address: str, + alias: str, + constraints: list[str], + tags_cache: dict[str, list[tuple[Any, str]]], + cache_root: Path | None, + fetch_url_override: str | None, +) -> ResolvedDependency: + """Resolve a dependency that has multiple version constraints (diamond). + + Gets/caches the remote tag list, parses all constraints, and selects the + minimum version satisfying all of them simultaneously. + + Args: + address: The package address. + alias: The dependency alias. + constraints: All version constraint strings from different dependents. + tags_cache: Shared cache of address -> tag list. + cache_root: Override for the package cache root. + fetch_url_override: Override clone URL (for tests). + + Returns: + The resolved dependency. + + Raises: + TransitiveDependencyError: If no version satisfies all constraints. + DependencyResolveError: If VCS operations fail. + """ + clone_url = fetch_url_override or address_to_clone_url(address) + + # Get or cache tag list + if address not in tags_cache: + try: + tags_cache[address] = list_remote_version_tags(clone_url) + except VCSFetchError as exc: + msg = f"Failed to list tags for '{address}': {exc}" + raise DependencyResolveError(msg) from exc + + version_tags = tags_cache[address] + versions = [entry[0] for entry in version_tags] + + # Parse all constraints and find a version satisfying all + parsed_constraints = [parse_constraint(constraint) for constraint in constraints] + selected = select_minimum_version_for_multiple_constraints(versions, parsed_constraints) + + if selected is None: + constraints_str = ", ".join(constraints) + msg = f"No version of '{address}' satisfies all constraints: {constraints_str}" + raise TransitiveDependencyError(msg) + + version_str = str(selected) + + # Check cache + if is_cached(address, version_str, cache_root): + cached_path = get_cached_package_path(address, version_str, cache_root) + log.verbose(f"Diamond dep '{alias}' ({address}@{version_str}) found in cache") + return _build_resolved_from_dir(alias, address, cached_path) + + # Find the corresponding tag name + selected_tag: str | None = None + for ver, tag_name in version_tags: + if ver == selected: + selected_tag = tag_name + break + + if selected_tag is None: + msg = f"Internal error: selected version {selected} not found in tag list for '{address}'" + raise DependencyResolveError(msg) + + # Clone and cache + try: + with tempfile.TemporaryDirectory(prefix="mthds_clone_") as tmp_dir: + clone_dest = Path(tmp_dir) / "pkg" + clone_at_version(clone_url, selected_tag, clone_dest) + cached_path = store_in_cache(clone_dest, address, version_str, cache_root) + except (VCSFetchError, PackageCacheError) as exc: + msg = f"Failed to fetch/cache '{address}@{version_str}': {exc}" + raise DependencyResolveError(msg) from exc + + log.verbose(f"Diamond dep '{alias}' ({address}@{version_str}) fetched and cached") + return _build_resolved_from_dir(alias, address, cached_path) + + +def _remove_stale_subdep_constraints( + old_manifest: MthdsPackageManifest | None, + resolved_map: dict[str, ResolvedDependency], + constraints_by_address: dict[str, list[str]], +) -> None: + """Remove constraints that were contributed by a dependency version being replaced. + + When a diamond re-resolution picks a new version, the OLD version's sub-dependencies + may have added constraints to ``constraints_by_address``. Those constraints are stale + because the old version is no longer active. This function recursively removes them. + + Args: + old_manifest: The manifest of the dependency version being replaced. + resolved_map: Address -> resolved dependency (entries may be removed). + constraints_by_address: Address -> list of version constraints (entries may be pruned). + """ + if old_manifest is None or not old_manifest.dependencies: + return + + for old_sub in old_manifest.dependencies: + if old_sub.path is not None: + continue + constraints_list = constraints_by_address.get(old_sub.address) + if constraints_list is None: + continue + # Remove the specific constraint string that the old sub-dep contributed + try: + constraints_list.remove(old_sub.version) + except ValueError: + continue + # If no constraints remain, the dep was only needed by the old version + if not constraints_list: + del constraints_by_address[old_sub.address] + old_resolved_sub = resolved_map.pop(old_sub.address, None) + if old_resolved_sub is not None: + # Recursively clean up the removed dep's own sub-dep contributions + _remove_stale_subdep_constraints(old_resolved_sub.manifest, resolved_map, constraints_by_address) + + +def _resolve_transitive_tree( + deps: list[PackageDependency], + resolution_stack: set[str], + resolved_map: dict[str, ResolvedDependency], + constraints_by_address: dict[str, list[str]], + tags_cache: dict[str, list[tuple[Any, str]]], + cache_root: Path | None, + fetch_url_overrides: dict[str, str] | None, +) -> None: + """Recursively resolve remote dependencies with cycle detection and diamond handling. + + Uses DFS with a stack set for cycle detection. Diamond dependencies (same address + reached via multiple paths) are resolved by finding a version satisfying all constraints. + + Args: + deps: Dependencies to resolve at this level. + resolution_stack: Addresses currently on the DFS path (cycle detection). + resolved_map: Address -> resolved dependency (deduplication). + constraints_by_address: Address -> list of version constraints seen. + tags_cache: Address -> cached tag list (avoid repeated git ls-remote). + cache_root: Override for the package cache root. + fetch_url_overrides: Map of address to override clone URL (for tests). + + Raises: + TransitiveDependencyError: If a cycle is detected or diamond constraints are unsatisfiable. + DependencyResolveError: If resolution fails. + """ + for dep in deps: + # Skip local path deps in transitive resolution + if dep.path is not None: + continue + + # Cycle detection + if dep.address in resolution_stack: + msg = f"Dependency cycle detected: '{dep.address}' is already on the resolution stack" + raise TransitiveDependencyError(msg) + + # Track constraint + if dep.address not in constraints_by_address: + constraints_by_address[dep.address] = [] + constraints_by_address[dep.address].append(dep.version) + + # Already resolved β€” check if existing version satisfies new constraint + if dep.address in resolved_map: + existing = resolved_map[dep.address] + if existing.manifest is not None: + existing_constraint = parse_constraint(dep.version) + existing_ver = parse_version(existing.manifest.version) + if version_satisfies(existing_ver, existing_constraint): + log.verbose(f"Transitive dep '{dep.address}' already resolved at {existing.manifest.version}, satisfies '{dep.version}'") + continue + + # Diamond: remove stale constraints from the old version's sub-deps + # before re-resolving, so they don't cause false conflicts + _remove_stale_subdep_constraints(existing.manifest, resolved_map, constraints_by_address) + + # Diamond: re-resolve with all constraints + override_url = (fetch_url_overrides or {}).get(dep.address) + re_resolved = _resolve_with_multiple_constraints( + address=dep.address, + alias=dep.alias, + constraints=constraints_by_address[dep.address], + tags_cache=tags_cache, + cache_root=cache_root, + fetch_url_override=override_url, + ) + resolved_map[dep.address] = re_resolved + + # Recurse into sub-dependencies of the re-resolved version, + # which may differ from the previously resolved version + if re_resolved.manifest is not None and re_resolved.manifest.dependencies: + remote_sub_deps = [sub for sub in re_resolved.manifest.dependencies if sub.path is None] + if remote_sub_deps: + resolution_stack.add(dep.address) + try: + _resolve_transitive_tree( + deps=remote_sub_deps, + resolution_stack=resolution_stack, + resolved_map=resolved_map, + constraints_by_address=constraints_by_address, + tags_cache=tags_cache, + cache_root=cache_root, + fetch_url_overrides=fetch_url_overrides, + ) + finally: + resolution_stack.discard(dep.address) + continue + + # Normal resolve + resolution_stack.add(dep.address) + try: + override_url = (fetch_url_overrides or {}).get(dep.address) + + # Check if multiple constraints already (shouldn't happen on first visit, but defensive) + if len(constraints_by_address[dep.address]) > 1: + resolved_dep = _resolve_with_multiple_constraints( + address=dep.address, + alias=dep.alias, + constraints=constraints_by_address[dep.address], + tags_cache=tags_cache, + cache_root=cache_root, + fetch_url_override=override_url, + ) + else: + resolved_dep = resolve_remote_dependency(dep, cache_root=cache_root, fetch_url_override=override_url) + + resolved_map[dep.address] = resolved_dep + + # Recurse into sub-dependencies (remote only) + if resolved_dep.manifest is not None and resolved_dep.manifest.dependencies: + remote_sub_deps = [sub for sub in resolved_dep.manifest.dependencies if sub.path is None] + if remote_sub_deps: + _resolve_transitive_tree( + deps=remote_sub_deps, + resolution_stack=resolution_stack, + resolved_map=resolved_map, + constraints_by_address=constraints_by_address, + tags_cache=tags_cache, + cache_root=cache_root, + fetch_url_overrides=fetch_url_overrides, + ) + finally: + resolution_stack.discard(dep.address) + + +def resolve_all_dependencies( + manifest: MthdsPackageManifest, + package_root: Path, + cache_root: Path | None = None, + fetch_url_overrides: dict[str, str] | None = None, +) -> list[ResolvedDependency]: + """Resolve all dependencies with transitive resolution for remote deps. + + Local path dependencies are resolved directly (no recursion into their sub-deps). + Remote dependencies are resolved transitively with cycle detection and diamond + constraint handling. + + Args: + manifest: The consuming package's manifest. + package_root: The root directory of the consuming package. + cache_root: Override for the package cache root. + fetch_url_overrides: Map of ``address`` to override clone URL (for tests). + + Returns: + List of resolved dependencies (local + all transitive remote). + + Raises: + DependencyResolveError: If any dependency fails to resolve. + TransitiveDependencyError: If cycles or unsatisfiable diamonds are found. + """ + # 1. Resolve local path deps (direct only, no recursion) + local_resolved: list[ResolvedDependency] = [] + remote_deps: list[PackageDependency] = [] + + for dep in manifest.dependencies: + if dep.path is not None: + resolved_dep = _resolve_local_dependency(dep, package_root) + local_resolved.append(resolved_dep) + local_export_count = len(resolved_dep.exported_pipe_codes) if resolved_dep.exported_pipe_codes is not None else "all" + log.verbose( + f"Resolved local dependency '{resolved_dep.alias}': {len(resolved_dep.mthds_files)} .mthds files, {local_export_count} exported pipes" + ) + else: + remote_deps.append(dep) + + # 2. Resolve remote deps transitively + resolved_map: dict[str, ResolvedDependency] = {} + constraints_by_address: dict[str, list[str]] = {} + tags_cache: dict[str, list[tuple[Any, str]]] = {} + resolution_stack: set[str] = set() + + if remote_deps: + _resolve_transitive_tree( + deps=remote_deps, + resolution_stack=resolution_stack, + resolved_map=resolved_map, + constraints_by_address=constraints_by_address, + tags_cache=tags_cache, + cache_root=cache_root, + fetch_url_overrides=fetch_url_overrides, + ) + + for resolved_dep in resolved_map.values(): + remote_export_count = len(resolved_dep.exported_pipe_codes) if resolved_dep.exported_pipe_codes is not None else "all" + log.verbose( + f"Resolved remote dependency '{resolved_dep.alias}': {len(resolved_dep.mthds_files)} .mthds files, {remote_export_count} exported pipes" + ) + + return local_resolved + list(resolved_map.values()) diff --git a/pipelex/core/packages/discovery.py b/pipelex/core/packages/discovery.py new file mode 100644 index 000000000..9d832c456 --- /dev/null +++ b/pipelex/core/packages/discovery.py @@ -0,0 +1,43 @@ +from pathlib import Path + +from pipelex.core.packages.manifest import MthdsPackageManifest +from pipelex.core.packages.manifest_parser import parse_methods_toml + +MANIFEST_FILENAME = "METHODS.toml" + + +def find_package_manifest(bundle_path: Path) -> MthdsPackageManifest | None: + """Walk up from a bundle file's directory to find the nearest METHODS.toml. + + Stops at the first METHODS.toml found, or when a .git/ directory is + encountered, or at the filesystem root. + + Args: + bundle_path: Path to a .mthds bundle file + + Returns: + The parsed MthdsPackageManifest, or None if no manifest is found + + Raises: + ManifestParseError: If a METHODS.toml is found but has invalid TOML syntax + ManifestValidationError: If a METHODS.toml is found but fails validation + """ + current = bundle_path.parent.resolve() + + while True: + manifest_path = current / MANIFEST_FILENAME + if manifest_path.is_file(): + content = manifest_path.read_text(encoding="utf-8") + return parse_methods_toml(content) + + # Stop at .git boundary + git_dir = current / ".git" + if git_dir.exists(): + return None + + # Stop at filesystem root + parent = current.parent + if parent == current: + return None + + current = parent diff --git a/pipelex/core/packages/exceptions.py b/pipelex/core/packages/exceptions.py new file mode 100644 index 000000000..a2a314004 --- /dev/null +++ b/pipelex/core/packages/exceptions.py @@ -0,0 +1,53 @@ +from pipelex.base_exceptions import PipelexError + + +class ManifestError(PipelexError): + pass + + +class ManifestParseError(ManifestError): + pass + + +class ManifestValidationError(ManifestError): + pass + + +class VCSFetchError(PipelexError): + """Raised when a git clone or tag listing operation fails.""" + + +class VersionResolutionError(PipelexError): + """Raised when no version satisfying the constraint can be found in remote tags.""" + + +class PackageCacheError(PipelexError): + """Raised when cache operations (lookup, store) fail.""" + + +class LockFileError(PipelexError): + """Raised when lock file parsing, generation, or I/O fails.""" + + +class IntegrityError(PipelexError): + """Raised when a cached package does not match its lock file hash.""" + + +class DependencyResolveError(PipelexError): + """Raised when a dependency cannot be resolved.""" + + +class TransitiveDependencyError(PipelexError): + """Raised for cycles or unsatisfiable diamond constraints in transitive resolution.""" + + +class IndexBuildError(PipelexError): + """Raised when building a package index entry fails.""" + + +class GraphBuildError(PipelexError): + """Raised when building the know-how graph fails.""" + + +class PublishValidationError(PipelexError): + """Raised when publish validation encounters an unrecoverable error.""" diff --git a/pipelex/core/packages/graph/__init__.py b/pipelex/core/packages/graph/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pipelex/core/packages/graph/chain_formatter.py b/pipelex/core/packages/graph/chain_formatter.py new file mode 100644 index 000000000..4e4ae6b57 --- /dev/null +++ b/pipelex/core/packages/graph/chain_formatter.py @@ -0,0 +1,97 @@ +"""Format pipe chains as human-readable MTHDS composition templates. + +Provides a formatter that takes a resolved pipe chain and produces +a multi-line snippet showing how to wire the pipes together. +""" + +from pipelex.core.packages.graph.models import ConceptId, PipeNode + + +def format_chain_as_mthds_snippet( + chain_pipes: list[PipeNode], + from_concept: ConceptId, + to_concept: ConceptId, +) -> str: + """Format a chain of PipeNodes as a human-readable MTHDS composition template. + + Args: + chain_pipes: Resolved PipeNode list representing the chain steps. + from_concept: The source ConceptId (what the user has). + to_concept: The target ConceptId (what the user needs). + + Returns: + Multi-line string with the composition template. + Empty string if chain_pipes is empty. + """ + if not chain_pipes: + return "" + + lines: list[str] = [] + + # Header: Composition: from -> intermediate(s) -> to + header_refs: list[str] = [_format_concept_ref(from_concept)] + for pipe_node in chain_pipes[:-1]: + header_refs.append(_format_concept_ref(pipe_node.output_concept_id)) + header_refs.append(_format_concept_ref(to_concept)) + lines.append(f"Composition: {' -> '.join(header_refs)}") + + # Steps + for step_number, pipe_node in enumerate(chain_pipes, start=1): + lines.append("") + lines.append(_format_step(step_number, pipe_node)) + + # Cross-package note + if _is_cross_package_chain(chain_pipes): + lines.append("") + lines.append( + "Note: This chain spans multiple packages. Use alias->domain.pipe_code\nsyntax for cross-package references in your .mthds file." + ) + + return "\n".join(lines) + + +def _format_concept_ref(concept_id: ConceptId) -> str: + """Return the concept_ref as-is for display. + + Args: + concept_id: The concept to format. + + Returns: + The concept_ref string (e.g. 'native.Text', 'pkg_test_legal.PkgTestContractClause'). + """ + return concept_id.concept_ref + + +def _format_step(step_number: int, pipe_node: PipeNode) -> str: + """Format one numbered step block. + + Args: + step_number: The 1-based step number. + pipe_node: The PipeNode for this step. + + Returns: + Multi-line string for the step block. + """ + inputs_str = ", ".join(f"{param_name}: {_format_concept_ref(concept_id)}" for param_name, concept_id in pipe_node.input_concept_ids.items()) + + step_lines = [ + f" Step {step_number}: {pipe_node.pipe_code}", + f" Package: {pipe_node.package_address}", + f" Domain: {pipe_node.domain_code}", + f" Input: {inputs_str}", + f" Output: {_format_concept_ref(pipe_node.output_concept_id)}", + ] + return "\n".join(step_lines) + + +def _is_cross_package_chain(chain_pipes: list[PipeNode]) -> bool: + """Check if a chain spans multiple packages. + + Args: + chain_pipes: The list of PipeNodes in the chain. + + Returns: + True if pipes come from more than one package_address. + """ + addresses = {pipe_node.package_address for pipe_node in chain_pipes} + return len(addresses) > 1 diff --git a/pipelex/core/packages/graph/graph_builder.py b/pipelex/core/packages/graph/graph_builder.py new file mode 100644 index 000000000..2a35e5c5a --- /dev/null +++ b/pipelex/core/packages/graph/graph_builder.py @@ -0,0 +1,393 @@ +"""Build a KnowHowGraph from a PackageIndex. + +Resolves concept identities, builds pipe nodes with resolved input/output concepts, +and creates data-flow and refinement edges. +""" + +from pipelex import log +from pipelex.core.concepts.native.concept_native import NativeConceptCode +from pipelex.core.packages.graph.models import ( + NATIVE_PACKAGE_ADDRESS, + ConceptId, + ConceptNode, + EdgeKind, + GraphEdge, + KnowHowGraph, + PipeNode, +) +from pipelex.core.packages.index.models import PackageIndex +from pipelex.core.qualified_ref import QualifiedRef, QualifiedRefError + + +def build_know_how_graph(index: PackageIndex) -> KnowHowGraph: + """Build a KnowHowGraph from a PackageIndex. + + Args: + index: The package index to build the graph from + + Returns: + A fully populated KnowHowGraph with concept nodes, pipe nodes, + refinement edges, and data-flow edges + + Note: + Unresolvable concepts and refines targets are logged as warnings + and excluded from the graph rather than raising errors. + """ + graph = KnowHowGraph() + + # Step 1: Build concept nodes + lookup table + package_concept_lookup: dict[str, dict[str, ConceptId]] = {} + _build_concept_nodes(index, graph, package_concept_lookup) + _build_native_concept_nodes(graph) + + # Step 2: Resolve refines targets + _resolve_refines_targets(index, graph, package_concept_lookup) + + # Step 3: Build pipe nodes + _build_pipe_nodes(index, graph, package_concept_lookup) + + # Step 4: Build refinement edges + _build_refinement_edges(graph) + + # Step 5: Build data flow edges + _build_data_flow_edges(graph) + + return graph + + +def _build_concept_nodes( + index: PackageIndex, + graph: KnowHowGraph, + package_concept_lookup: dict[str, dict[str, ConceptId]], +) -> None: + """Create ConceptNodes for all concepts in all packages and populate the lookup table.""" + for address, concept_entry in index.all_concepts(): + concept_id = ConceptId( + package_address=address, + concept_ref=concept_entry.concept_ref, + ) + node = ConceptNode( + concept_id=concept_id, + description=concept_entry.description, + structure_fields=list(concept_entry.structure_fields), + ) + graph.concept_nodes[concept_id.node_key] = node + + if address not in package_concept_lookup: + package_concept_lookup[address] = {} + package_concept_lookup[address][concept_entry.concept_ref] = concept_id + + +def _build_native_concept_nodes(graph: KnowHowGraph) -> None: + """Create ConceptNodes for all native concepts.""" + for native_code in NativeConceptCode: + concept_ref = f"native.{native_code}" + concept_id = ConceptId( + package_address=NATIVE_PACKAGE_ADDRESS, + concept_ref=concept_ref, + ) + if concept_id.node_key not in graph.concept_nodes: + node = ConceptNode( + concept_id=concept_id, + description=f"Native concept: {native_code}", + ) + graph.concept_nodes[concept_id.node_key] = node + + +def _resolve_refines_targets( + index: PackageIndex, + graph: KnowHowGraph, + package_concept_lookup: dict[str, dict[str, ConceptId]], +) -> None: + """Resolve refines strings to ConceptIds and update ConceptNodes.""" + for address, concept_entry in index.all_concepts(): + if concept_entry.refines is None: + continue + + concept_id = ConceptId( + package_address=address, + concept_ref=concept_entry.concept_ref, + ) + existing_node = graph.concept_nodes.get(concept_id.node_key) + if existing_node is None: + continue + + refines_target = _resolve_refines_string( + refines=concept_entry.refines, + package_address=address, + index=index, + package_concept_lookup=package_concept_lookup, + ) + if refines_target is None: + log.warning(f"Could not resolve refines target '{concept_entry.refines}' for concept {concept_id.node_key}") + continue + + # Replace the node with one that has the resolved refines link + updated_node = ConceptNode( + concept_id=existing_node.concept_id, + description=existing_node.description, + refines=refines_target, + structure_fields=list(existing_node.structure_fields), + ) + graph.concept_nodes[concept_id.node_key] = updated_node + + +def _resolve_refines_string( + refines: str, + package_address: str, + index: PackageIndex, + package_concept_lookup: dict[str, dict[str, ConceptId]], +) -> ConceptId | None: + """Resolve a refines string to a ConceptId. + + Handles cross-package refs (alias->domain.Code) and local refs. + """ + if QualifiedRef.has_cross_package_prefix(refines): + alias, remainder = QualifiedRef.split_cross_package_ref(refines) + entry = index.get_entry(package_address) + if entry is None: + return None + resolved_address = entry.dependency_aliases.get(alias) + if resolved_address is None: + log.warning(f"Unknown dependency alias '{alias}' in refines '{refines}' for package {package_address}") + return None + return ConceptId( + package_address=resolved_address, + concept_ref=remainder, + ) + + # Local reference: look up in same package + local_lookup = package_concept_lookup.get(package_address, {}) + # Try as a concept_ref (domain-qualified) key + if refines in local_lookup: + return local_lookup[refines] + # Fall back to bare concept code match + for concept_id in local_lookup.values(): + if concept_id.concept_code == refines: + return concept_id + return None + + +def _resolve_concept_code( + concept_spec: str, + package_address: str, + domain_code: str, + package_concept_lookup: dict[str, dict[str, ConceptId]], + index: PackageIndex, +) -> ConceptId | None: + """Resolve a concept spec string (from pipe input/output) to a ConceptId. + + Handles native concepts, bare concept codes, domain-qualified refs + (e.g. ``domain.ConceptCode``), and cross-package refs + (e.g. ``alias->domain.ConceptCode``). + + Args: + concept_spec: The concept spec string (e.g. "Text", "PkgTestContractClause", + "domain.ConceptCode", "alias->domain.ConceptCode") + package_address: The package address containing the pipe + domain_code: The domain code of the pipe + package_concept_lookup: The package->code->ConceptId lookup table + index: The package index (needed for cross-package alias resolution) + + Returns: + A resolved ConceptId, or None if the concept could not be resolved + """ + # Check if it's a native concept + if NativeConceptCode.is_native_concept_ref_or_code(concept_spec): + native_ref = NativeConceptCode.get_validated_native_concept_ref(concept_spec) + return ConceptId( + package_address=NATIVE_PACKAGE_ADDRESS, + concept_ref=native_ref, + ) + + # Cross-package ref: alias->domain.ConceptCode + if QualifiedRef.has_cross_package_prefix(concept_spec): + return _resolve_cross_package_concept(concept_spec, package_address, index, package_concept_lookup) + + # Look up in same package β€” try as concept_ref (domain-qualified) key first + local_lookup = package_concept_lookup.get(package_address, {}) + if concept_spec in local_lookup: + return local_lookup[concept_spec] + + # Fall back to bare concept code match + for concept_id in local_lookup.values(): + if concept_id.concept_code == concept_spec: + return concept_id + + # Unresolved: log warning and return None to exclude from the graph + log.warning(f"Could not resolve concept '{concept_spec}' in package {package_address}, domain {domain_code}") + return None + + +def _resolve_cross_package_concept( + concept_spec: str, + package_address: str, + index: PackageIndex, + package_concept_lookup: dict[str, dict[str, ConceptId]], +) -> ConceptId | None: + """Resolve a cross-package concept spec (alias->domain.ConceptCode) to a ConceptId. + + Args: + concept_spec: The cross-package concept spec (e.g. "scoring_dep->pkg_test_scoring.Score") + package_address: The address of the package containing the reference + index: The package index for alias resolution + package_concept_lookup: The package->code->ConceptId lookup table + + Returns: + A resolved ConceptId, or None if the alias or concept could not be resolved + """ + alias, remainder = QualifiedRef.split_cross_package_ref(concept_spec) + entry = index.get_entry(package_address) + if entry is None: + log.warning(f"Package '{package_address}' not found in index for cross-package ref '{concept_spec}'") + return None + + resolved_address = entry.dependency_aliases.get(alias) + if resolved_address is None: + log.warning(f"Unknown dependency alias '{alias}' in concept spec '{concept_spec}' for package {package_address}") + return None + + target_lookup = package_concept_lookup.get(resolved_address, {}) + + # Try by full concept_ref (remainder is domain.ConceptCode) + if remainder in target_lookup: + return target_lookup[remainder] + + # Fall back to bare concept code (last segment of remainder) + try: + ref = QualifiedRef.parse(remainder) + except QualifiedRefError: + log.warning(f"Malformed cross-package concept spec '{concept_spec}': remainder '{remainder}' is not a valid reference") + return None + for concept_id in target_lookup.values(): + if concept_id.concept_code == ref.local_code: + return concept_id + + log.warning(f"Could not resolve cross-package concept '{concept_spec}' in target package {resolved_address}") + return None + + +def _build_pipe_nodes( + index: PackageIndex, + graph: KnowHowGraph, + package_concept_lookup: dict[str, dict[str, ConceptId]], +) -> None: + """Create PipeNodes with resolved concept identities. + + Pipes with unresolvable output or input concepts are excluded from the + graph rather than creating dangling references. + """ + for address, pipe_sig in index.all_pipes(): + output_concept_id = _resolve_concept_code( + concept_spec=pipe_sig.output_spec, + package_address=address, + domain_code=pipe_sig.domain_code, + package_concept_lookup=package_concept_lookup, + index=index, + ) + if output_concept_id is None: + log.warning(f"Excluding pipe '{pipe_sig.pipe_code}' from graph: unresolvable output concept '{pipe_sig.output_spec}'") + continue + + input_concept_ids: dict[str, ConceptId] = {} + has_unresolvable_input = False + for param_name, input_spec in pipe_sig.input_specs.items(): + resolved_input = _resolve_concept_code( + concept_spec=input_spec, + package_address=address, + domain_code=pipe_sig.domain_code, + package_concept_lookup=package_concept_lookup, + index=index, + ) + if resolved_input is None: + log.warning(f"Excluding pipe '{pipe_sig.pipe_code}' from graph: unresolvable input concept '{input_spec}' for param '{param_name}'") + has_unresolvable_input = True + break + input_concept_ids[param_name] = resolved_input + + if has_unresolvable_input: + continue + + pipe_node = PipeNode( + package_address=address, + pipe_code=pipe_sig.pipe_code, + pipe_type=pipe_sig.pipe_type, + domain_code=pipe_sig.domain_code, + description=pipe_sig.description, + is_exported=pipe_sig.is_exported, + input_concept_ids=input_concept_ids, + output_concept_id=output_concept_id, + ) + graph.pipe_nodes[pipe_node.node_key] = pipe_node + + +def _build_refinement_edges(graph: KnowHowGraph) -> None: + """Create REFINEMENT edges for each concept that refines another.""" + for concept_node in graph.concept_nodes.values(): + if concept_node.refines is not None: + edge = GraphEdge( + kind=EdgeKind.REFINEMENT, + source_concept_id=concept_node.concept_id, + target_concept_id=concept_node.refines, + ) + graph.refinement_edges.append(edge) + + +def _build_data_flow_edges(graph: KnowHowGraph) -> None: + """Build data flow edges connecting pipes whose outputs feed other pipes' inputs. + + A pipe's output is compatible with another pipe's input if: + - The output concept is exactly the input concept, OR + - The output concept is a refinement (descendant) of the input concept + """ + # Build a reverse index: concept_node_key -> list of pipe_keys that produce it + producers_by_concept: dict[str, list[str]] = {} + + for pipe_key, pipe_node in graph.pipe_nodes.items(): + # Walk up the refinement chain from output concept, collecting all ancestor keys + ancestor_keys = _collect_refinement_ancestors(pipe_node.output_concept_id, graph) + for ancestor_key in ancestor_keys: + if ancestor_key not in producers_by_concept: + producers_by_concept[ancestor_key] = [] + producers_by_concept[ancestor_key].append(pipe_key) + + # For each pipe's each input, look up compatible producers + for target_key, target_pipe in graph.pipe_nodes.items(): + for param_name, input_concept_id in target_pipe.input_concept_ids.items(): + producer_keys = producers_by_concept.get(input_concept_id.node_key, []) + for source_key in producer_keys: + if source_key == target_key: + continue # Skip self-loops + edge = GraphEdge( + kind=EdgeKind.DATA_FLOW, + source_pipe_key=source_key, + target_pipe_key=target_key, + input_param=param_name, + ) + graph.data_flow_edges.append(edge) + + +def _collect_refinement_ancestors(concept_id: ConceptId, graph: KnowHowGraph) -> list[str]: + """Walk up the refinement chain from a concept, collecting all ancestor node_keys. + + Returns the concept itself plus all its ancestors via refines links. + Used for data flow: if A refines B, then a producer of A can also + satisfy inputs expecting B. + """ + result: list[str] = [] + visited: set[str] = set() + current: ConceptId | None = concept_id + + while current is not None: + node_key = current.node_key + if node_key in visited: + break # Cycle detection + visited.add(node_key) + result.append(node_key) + + concept_node = graph.concept_nodes.get(node_key) + if concept_node is None: + break + current = concept_node.refines + + return result diff --git a/pipelex/core/packages/graph/models.py b/pipelex/core/packages/graph/models.py new file mode 100644 index 000000000..476ccecd1 --- /dev/null +++ b/pipelex/core/packages/graph/models.py @@ -0,0 +1,121 @@ +"""Data models for the know-how graph: concepts, pipes, edges, and the graph container.""" + +from pydantic import BaseModel, ConfigDict, Field + +from pipelex.tools.typing.pydantic_utils import empty_list_factory_of +from pipelex.types import StrEnum + +NATIVE_PACKAGE_ADDRESS = "__native__" + + +class ConceptId(BaseModel): + """Unique concept identity across the ecosystem. + + Combines a package address with a domain-qualified concept reference + to uniquely identify concepts even when different packages define + concepts with the same code. + """ + + model_config = ConfigDict(frozen=True, extra="forbid") + + package_address: str + concept_ref: str + + @property + def node_key(self) -> str: + return f"{self.package_address}::{self.concept_ref}" + + @property + def concept_code(self) -> str: + """Last segment of the concept_ref (split on '.').""" + return self.concept_ref.rsplit(".", maxsplit=1)[-1] + + @property + def is_native(self) -> bool: + return self.package_address == NATIVE_PACKAGE_ADDRESS + + +class EdgeKind(StrEnum): + DATA_FLOW = "data_flow" + REFINEMENT = "refinement" + + +class PipeNode(BaseModel): + """A pipe in the graph with resolved concept identities.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + package_address: str + pipe_code: str + pipe_type: str + domain_code: str + description: str + is_exported: bool + input_concept_ids: dict[str, ConceptId] = Field(default_factory=dict) + output_concept_id: ConceptId + + @property + def node_key(self) -> str: + return f"{self.package_address}::{self.pipe_code}" + + +class ConceptNode(BaseModel): + """A concept in the graph with optional refinement link.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + concept_id: ConceptId + description: str + refines: ConceptId | None = None + structure_fields: list[str] = Field(default_factory=list) + + +class GraphEdge(BaseModel): + """An edge in the know-how graph, discriminated by kind. + + For DATA_FLOW edges: source_pipe_key and target_pipe_key identify connected pipes, + input_param names the target pipe's input parameter being satisfied. + + For REFINEMENT edges: source_concept_id refines target_concept_id. + """ + + model_config = ConfigDict(frozen=True, extra="forbid") + + kind: EdgeKind = Field(strict=False) + # DATA_FLOW fields + source_pipe_key: str | None = None + target_pipe_key: str | None = None + input_param: str | None = None + # REFINEMENT fields + source_concept_id: ConceptId | None = None + target_concept_id: ConceptId | None = None + + +class KnowHowGraph(BaseModel): + """Mutable container for the know-how graph. + + Holds pipe nodes, concept nodes, and edges connecting them. + """ + + model_config = ConfigDict(extra="forbid") + + pipe_nodes: dict[str, PipeNode] = Field(default_factory=dict) + concept_nodes: dict[str, ConceptNode] = Field(default_factory=dict) + data_flow_edges: list[GraphEdge] = Field(default_factory=empty_list_factory_of(GraphEdge)) + refinement_edges: list[GraphEdge] = Field(default_factory=empty_list_factory_of(GraphEdge)) + + def get_pipe_node(self, key: str) -> PipeNode | None: + """Retrieve a pipe node by its node_key.""" + return self.pipe_nodes.get(key) + + def get_concept_node(self, concept_id: ConceptId) -> ConceptNode | None: + """Retrieve a concept node by its ConceptId.""" + return self.concept_nodes.get(concept_id.node_key) + + def get_outgoing_data_flow(self, pipe_key: str) -> list[GraphEdge]: + """Return data flow edges where the given pipe is the source (producer).""" + return [edge for edge in self.data_flow_edges if edge.source_pipe_key == pipe_key] + + def get_incoming_data_flow(self, pipe_key: str) -> list[GraphEdge]: + """Return data flow edges where the given pipe is the target (consumer).""" + return [edge for edge in self.data_flow_edges if edge.target_pipe_key == pipe_key] diff --git a/pipelex/core/packages/graph/query_engine.py b/pipelex/core/packages/graph/query_engine.py new file mode 100644 index 000000000..e7200e93e --- /dev/null +++ b/pipelex/core/packages/graph/query_engine.py @@ -0,0 +1,210 @@ +"""Query engine for the know-how graph. + +Provides type-driven discovery: find pipes by concept compatibility, +check pipe chaining, and search for multi-step pipe chains. +""" + +from collections import deque + +from pipelex.core.packages.graph.models import ( + ConceptId, + KnowHowGraph, + PipeNode, +) + + +def _concepts_are_compatible( + output_id: ConceptId, + input_id: ConceptId, + graph: KnowHowGraph, +) -> bool: + """Check if an output concept is compatible with an input concept. + + Compatible means the output is the exact same concept as the input, + or the output is a refinement (descendant) of the input concept. + + Args: + output_id: The concept produced by a pipe + input_id: The concept expected by another pipe's input + graph: The know-how graph for resolving refinement chains + + Returns: + True if output_id can satisfy input_id + """ + visited: set[str] = set() + current: ConceptId | None = output_id + + while current is not None: + if current.node_key == input_id.node_key: + return True + node_key = current.node_key + if node_key in visited: + break # Cycle detection + visited.add(node_key) + + concept_node = graph.concept_nodes.get(node_key) + if concept_node is None: + break + current = concept_node.refines + + return False + + +class KnowHowQueryEngine: + """Query engine for type-driven discovery on a KnowHowGraph. + + Provides methods to find pipes by concept compatibility, check pipe chaining, + and search for multi-step pipe chains. + """ + + def __init__(self, graph: KnowHowGraph) -> None: + self._graph = graph + + def query_what_can_i_do(self, concept_id: ConceptId) -> list[PipeNode]: + """Find pipes that accept the given concept as input. + + A pipe accepts the concept if any of its input parameters expects + the exact concept or an ancestor (the concept is-a the expected input + via the refinement chain). + + Args: + concept_id: The concept you have available + + Returns: + List of PipeNodes that can consume this concept + """ + result: list[PipeNode] = [] + for pipe_node in self._graph.pipe_nodes.values(): + for input_concept_id in pipe_node.input_concept_ids.values(): + if _concepts_are_compatible(concept_id, input_concept_id, self._graph): + result.append(pipe_node) + break # Don't add the same pipe twice + return result + + def query_what_produces(self, concept_id: ConceptId) -> list[PipeNode]: + """Find pipes that produce the given concept. + + A pipe produces the concept if its output is the exact concept + or a refinement (descendant) of it. + + Args: + concept_id: The concept you need + + Returns: + List of PipeNodes that can produce this concept + """ + result: list[PipeNode] = [] + for pipe_node in self._graph.pipe_nodes.values(): + if _concepts_are_compatible(pipe_node.output_concept_id, concept_id, self._graph): + result.append(pipe_node) + return result + + def check_compatibility(self, source_pipe_key: str, target_pipe_key: str) -> list[str]: + """Check which target pipe input params are compatible with the source pipe's output. + + Args: + source_pipe_key: The node_key of the source (producer) pipe + target_pipe_key: The node_key of the target (consumer) pipe + + Returns: + List of target pipe input parameter names that are compatible. + Empty list means the pipes are incompatible. + """ + source_pipe = self._graph.get_pipe_node(source_pipe_key) + target_pipe = self._graph.get_pipe_node(target_pipe_key) + if source_pipe is None or target_pipe is None: + return [] + + compatible_params: list[str] = [] + for param_name, input_concept_id in target_pipe.input_concept_ids.items(): + if _concepts_are_compatible(source_pipe.output_concept_id, input_concept_id, self._graph): + compatible_params.append(param_name) + return compatible_params + + def resolve_refinement_chain(self, concept_id: ConceptId) -> list[ConceptId]: + """Walk up from concept through refines links. + + Args: + concept_id: The starting concept + + Returns: + List of [concept, parent, grandparent, ...] following the refinement chain. + Cycle-safe via visited set. + """ + chain: list[ConceptId] = [] + visited: set[str] = set() + current: ConceptId | None = concept_id + + while current is not None: + node_key = current.node_key + if node_key in visited: + break # Cycle detection + visited.add(node_key) + chain.append(current) + + concept_node = self._graph.concept_nodes.get(node_key) + if concept_node is None: + break + current = concept_node.refines + + return chain + + def query_i_have_i_need( + self, + input_concept_id: ConceptId, + output_concept_id: ConceptId, + max_depth: int = 3, + ) -> list[list[str]]: + """Find multi-step pipe chains from input to output concept via BFS. + + Args: + input_concept_id: The concept you have + output_concept_id: The concept you need + max_depth: Maximum number of pipes in a chain + + Returns: + List of pipe chains (each chain is a list of pipe node_keys), + sorted shortest-first. Empty if no path found. + """ + # Find starter pipes: those that accept input_concept_id + starter_pipes = self.query_what_can_i_do(input_concept_id) + if not starter_pipes: + return [] + + results: list[list[str]] = [] + # BFS queue: (current_chain, set_of_visited_pipe_keys) + queue: deque[tuple[list[str], set[str]]] = deque() + + for pipe_node in starter_pipes: + queue.append(([pipe_node.node_key], {pipe_node.node_key})) + + while queue: + chain, visited = queue.popleft() + if len(chain) > max_depth: + continue + + # Check if last pipe in chain produces the desired output + last_pipe_key = chain[-1] + last_pipe = self._graph.get_pipe_node(last_pipe_key) + if last_pipe is None: + continue + + if _concepts_are_compatible(last_pipe.output_concept_id, output_concept_id, self._graph): + results.append(chain) + continue # Found a complete chain, don't extend further + + # Don't extend if already at max depth + if len(chain) >= max_depth: + continue + + # Find next pipes that can consume this pipe's output + next_pipes = self.query_what_can_i_do(last_pipe.output_concept_id) + for next_pipe in next_pipes: + if next_pipe.node_key not in visited: + new_chain = [*chain, next_pipe.node_key] + new_visited = visited | {next_pipe.node_key} + queue.append((new_chain, new_visited)) + + # Sort shortest-first + results.sort(key=len) + return results diff --git a/pipelex/core/packages/index/__init__.py b/pipelex/core/packages/index/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pipelex/core/packages/index/index_builder.py b/pipelex/core/packages/index/index_builder.py new file mode 100644 index 000000000..8b114bb3f --- /dev/null +++ b/pipelex/core/packages/index/index_builder.py @@ -0,0 +1,258 @@ +"""Build package index entries by scanning METHODS.toml and .mthds files. + +Operates at blueprint level (string-based signatures) β€” no runtime class loading, +no side effects. Pure file scanning. +""" + +from pathlib import Path + +from pipelex import log +from pipelex.core.concepts.concept_blueprint import ConceptBlueprint +from pipelex.core.concepts.concept_structure_blueprint import ConceptStructureBlueprint +from pipelex.core.interpreter.interpreter import PipelexInterpreter +from pipelex.core.packages.dependency_resolver import collect_mthds_files, determine_exported_pipes +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.exceptions import IndexBuildError +from pipelex.core.packages.index.models import ( + ConceptEntry, + DomainEntry, + PackageIndex, + PackageIndexEntry, + PipeSignature, +) +from pipelex.core.packages.manifest import MthdsPackageManifest +from pipelex.core.packages.manifest_parser import parse_methods_toml +from pipelex.core.packages.package_cache import get_default_cache_root + + +def build_index_entry_from_package(package_root: Path) -> PackageIndexEntry: + """Build a PackageIndexEntry by parsing METHODS.toml and .mthds files. + + Args: + package_root: Root directory of the package + + Returns: + A PackageIndexEntry with all metadata, domains, concepts, and pipe signatures + + Raises: + IndexBuildError: If the package cannot be indexed + """ + manifest = _load_manifest(package_root) + if manifest is None: + msg = f"No METHODS.toml found in {package_root}" + raise IndexBuildError(msg) + + mthds_files = collect_mthds_files(package_root) + if not mthds_files: + msg = f"No .mthds files found in {package_root}" + raise IndexBuildError(msg) + + exported_pipe_codes = determine_exported_pipes(manifest) + domains: dict[str, DomainEntry] = {} + concepts: list[ConceptEntry] = [] + pipes: list[PipeSignature] = [] + errors: list[str] = [] + + for mthds_file in mthds_files: + try: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file) + except Exception as exc: + errors.append(f"{mthds_file}: {exc}") + continue + + domain_code = blueprint.domain + if domain_code not in domains: + domains[domain_code] = DomainEntry( + domain_code=domain_code, + description=blueprint.description, + ) + + if blueprint.concept: + for concept_code, concept_blueprint in blueprint.concept.items(): + concepts.append(_build_concept_entry(concept_code, domain_code, concept_blueprint)) + + if blueprint.pipe: + for pipe_code, pipe_blueprint in blueprint.pipe.items(): + is_exported = _is_pipe_exported(pipe_code, exported_pipe_codes, blueprint.main_pipe) + pipes.append( + PipeSignature( + pipe_code=pipe_code, + pipe_type=pipe_blueprint.type, + domain_code=domain_code, + description=pipe_blueprint.description, + input_specs=dict(pipe_blueprint.inputs) if pipe_blueprint.inputs else {}, + output_spec=pipe_blueprint.output, + is_exported=is_exported, + ) + ) + + if errors: + log.warning(f"Errors while indexing {package_root}: {errors}") + + dependency_addresses = [dep.address for dep in manifest.dependencies] + dependency_aliases = {dep.alias: dep.address for dep in manifest.dependencies} + + return PackageIndexEntry( + address=manifest.address, + display_name=manifest.display_name, + version=manifest.version, + description=manifest.description, + authors=list(manifest.authors), + license=manifest.license, + domains=sorted(domains.values(), key=lambda dom: dom.domain_code), + concepts=concepts, + pipes=pipes, + dependencies=dependency_addresses, + dependency_aliases=dependency_aliases, + ) + + +def build_index_from_cache(cache_root: Path | None = None) -> PackageIndex: + """Build a PackageIndex by scanning all packages in the cache. + + The cache layout is ``cache_root/{address}/{version}/`` where address + can have multiple path segments (e.g. ``github.com/org/repo``). We find + package directories by scanning for ``METHODS.toml`` files recursively. + + Args: + cache_root: Override for cache root directory (default: ~/.mthds/packages) + + Returns: + A PackageIndex with entries for all cached packages + """ + root = cache_root or get_default_cache_root() + index = PackageIndex() + + if not root.is_dir(): + return index + + for manifest_path in sorted(root.rglob(MANIFEST_FILENAME)): + package_dir = manifest_path.parent + try: + entry = build_index_entry_from_package(package_dir) + index.add_entry(entry) + except IndexBuildError as exc: + log.warning(f"Skipping cached package {package_dir}: {exc}") + + return index + + +def build_index_from_project(project_root: Path) -> PackageIndex: + """Build a PackageIndex from the current project and its resolved dependencies. + + Indexes the project itself (if it has METHODS.toml) plus any dependency + packages found in the cache. + + Args: + project_root: Root directory of the project + + Returns: + A PackageIndex with the project and its dependencies + """ + index = PackageIndex() + + manifest = _load_manifest(project_root) + if manifest is None: + return index + + mthds_files = collect_mthds_files(project_root) + if mthds_files: + try: + entry = build_index_entry_from_package(project_root) + index.add_entry(entry) + except IndexBuildError as exc: + log.warning(f"Could not index project: {exc}") + + # Index cached dependencies + for dep in manifest.dependencies: + if dep.path: + # Local path dependency β€” index from the path + dep_path = (project_root / dep.path).resolve() + if dep_path.is_dir(): + try: + entry = build_index_entry_from_package(dep_path) + index.add_entry(entry) + except IndexBuildError as exc: + log.warning(f"Could not index local dependency {dep.alias}: {exc}") + else: + # Remote dependency β€” look in cache + _index_cached_dependency(index, dep.address) + + return index + + +def _load_manifest(package_root: Path) -> MthdsPackageManifest | None: + """Load METHODS.toml from a package root, or return None.""" + manifest_path = package_root / MANIFEST_FILENAME + if not manifest_path.is_file(): + return None + content = manifest_path.read_text(encoding="utf-8") + return parse_methods_toml(content) + + +def _build_concept_entry( + concept_code: str, + domain_code: str, + concept_blueprint: ConceptBlueprint | str, +) -> ConceptEntry: + """Build a ConceptEntry from a concept blueprint.""" + if isinstance(concept_blueprint, str): + return ConceptEntry( + concept_code=concept_code, + domain_code=domain_code, + concept_ref=f"{domain_code}.{concept_code}", + description=concept_blueprint, + ) + + structure_fields: list[str] = [] + if isinstance(concept_blueprint.structure, dict): + for field_name, field_blueprint in concept_blueprint.structure.items(): + if isinstance(field_blueprint, ConceptStructureBlueprint): + structure_fields.append(field_name) + else: + structure_fields.append(field_name) + + return ConceptEntry( + concept_code=concept_code, + domain_code=domain_code, + concept_ref=f"{domain_code}.{concept_code}", + description=concept_blueprint.description, + refines=concept_blueprint.refines, + structure_fields=structure_fields, + ) + + +def _is_pipe_exported( + pipe_code: str, + exported_pipe_codes: set[str] | None, + main_pipe: str | None, +) -> bool: + """Determine if a pipe is exported. + + A pipe is exported if: + - exported_pipe_codes is None (no manifest = all public) + - pipe_code is in the exported set + - pipe_code is the main_pipe (auto-exported) + """ + if exported_pipe_codes is None: + return True + return pipe_code in exported_pipe_codes or pipe_code == main_pipe + + +def _index_cached_dependency(index: PackageIndex, address: str) -> None: + """Try to index a remote dependency from the cache.""" + cache_root = get_default_cache_root() + address_dir = cache_root / address + if not address_dir.is_dir(): + return + + # Index the latest version found in cache + version_dirs = sorted(address_dir.iterdir(), reverse=True) + for version_dir in version_dirs: + if version_dir.is_dir(): + try: + entry = build_index_entry_from_package(version_dir) + index.add_entry(entry) + return + except IndexBuildError: + continue diff --git a/pipelex/core/packages/index/models.py b/pipelex/core/packages/index/models.py new file mode 100644 index 000000000..b1402e5f8 --- /dev/null +++ b/pipelex/core/packages/index/models.py @@ -0,0 +1,100 @@ +from pydantic import BaseModel, ConfigDict, Field + +from pipelex.tools.typing.pydantic_utils import empty_list_factory_of + + +class PipeSignature(BaseModel): + """Indexed representation of a pipe's typed signature. + + Stores pipe metadata and input/output concept specs as strings + (blueprint-level, no runtime class loading). + """ + + model_config = ConfigDict(frozen=True, extra="forbid") + + pipe_code: str + pipe_type: str + domain_code: str + description: str + input_specs: dict[str, str] = Field(default_factory=dict) + output_spec: str + is_exported: bool + + +class ConceptEntry(BaseModel): + """Indexed representation of a concept definition.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + concept_code: str + domain_code: str + concept_ref: str + description: str + refines: str | None = None + structure_fields: list[str] = Field(default_factory=list) + + +class DomainEntry(BaseModel): + """Indexed representation of a domain.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + domain_code: str + description: str | None = None + + +class PackageIndexEntry(BaseModel): + """Indexed view of a single package: metadata + domains + concepts + pipe signatures.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + address: str + display_name: str | None = None + version: str + description: str + authors: list[str] = Field(default_factory=list) + license: str | None = None + domains: list[DomainEntry] = Field(default_factory=empty_list_factory_of(DomainEntry)) + concepts: list[ConceptEntry] = Field(default_factory=empty_list_factory_of(ConceptEntry)) + pipes: list[PipeSignature] = Field(default_factory=empty_list_factory_of(PipeSignature)) + dependencies: list[str] = Field(default_factory=list) + dependency_aliases: dict[str, str] = Field(default_factory=dict) + + +class PackageIndex(BaseModel): + """Collection of indexed packages, keyed by address.""" + + model_config = ConfigDict(extra="forbid") + + entries: dict[str, PackageIndexEntry] = Field(default_factory=dict) + + def add_entry(self, entry: PackageIndexEntry) -> None: + """Add or replace a package index entry.""" + self.entries[entry.address] = entry + + def get_entry(self, address: str) -> PackageIndexEntry | None: + """Retrieve an entry by address, or None if not found.""" + return self.entries.get(address) + + def remove_entry(self, address: str) -> bool: + """Remove an entry by address. Returns True if it existed.""" + if address in self.entries: + del self.entries[address] + return True + return False + + def all_concepts(self) -> list[tuple[str, ConceptEntry]]: + """Return all concepts across all packages as (address, ConceptEntry) pairs.""" + result: list[tuple[str, ConceptEntry]] = [] + for address, entry in self.entries.items(): + for concept in entry.concepts: + result.append((address, concept)) + return result + + def all_pipes(self) -> list[tuple[str, PipeSignature]]: + """Return all pipes across all packages as (address, PipeSignature) pairs.""" + result: list[tuple[str, PipeSignature]] = [] + for address, entry in self.entries.items(): + for pipe in entry.pipes: + result.append((address, pipe)) + return result diff --git a/pipelex/core/packages/lock_file.py b/pipelex/core/packages/lock_file.py new file mode 100644 index 000000000..46855fb74 --- /dev/null +++ b/pipelex/core/packages/lock_file.py @@ -0,0 +1,286 @@ +"""Lock file model, hash computation, TOML I/O, generation, and verification. + +The lock file (``methods.lock``) records exact resolved versions and SHA-256 +integrity hashes for remote dependencies, enabling reproducible builds. +""" + +import hashlib +import re +from pathlib import Path +from typing import Any, cast + +import tomlkit +from pydantic import BaseModel, ConfigDict, Field, field_validator + +from pipelex.core.packages.exceptions import IntegrityError, LockFileError +from pipelex.core.packages.manifest import MthdsPackageManifest, is_valid_semver +from pipelex.core.packages.package_cache import get_cached_package_path +from pipelex.tools.misc.toml_utils import TomlError, load_toml_from_content + +LOCK_FILENAME = "methods.lock" +HASH_PREFIX = "sha256:" + +_HASH_PATTERN = re.compile(r"^sha256:[0-9a-f]{64}$") + + +# --------------------------------------------------------------------------- +# Models +# --------------------------------------------------------------------------- + + +class LockedPackage(BaseModel): + """A single locked dependency entry.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + version: str + hash: str + source: str + + @field_validator("version") + @classmethod + def validate_version(cls, version: str) -> str: + if not is_valid_semver(version): + msg = f"Invalid version '{version}' in lock file. Must be valid semver." + raise ValueError(msg) + return version + + @field_validator("hash") + @classmethod + def validate_hash(cls, hash_value: str) -> str: + if not _HASH_PATTERN.match(hash_value): + msg = f"Invalid hash '{hash_value}'. Must be '{HASH_PREFIX}' followed by exactly 64 hex characters." + raise ValueError(msg) + return hash_value + + @field_validator("source") + @classmethod + def validate_source(cls, source: str) -> str: + if not source.startswith("https://"): + msg = f"Invalid source '{source}'. Must start with 'https://'." + raise ValueError(msg) + return source + + +class LockFile(BaseModel): + """The methods.lock file model.""" + + model_config = ConfigDict(frozen=True, extra="forbid") + + packages: dict[str, LockedPackage] = Field(default_factory=dict) + + +# --------------------------------------------------------------------------- +# Hash computation +# --------------------------------------------------------------------------- + + +def compute_directory_hash(directory: Path) -> str: + """Compute a deterministic SHA-256 hash of a directory's contents. + + Collects all regular files recursively, skips any path containing ``.git`` + in parts, sorts by POSIX-normalized relative path, and feeds each file's + relative path string (UTF-8) + raw bytes into a single hasher. + + Args: + directory: The directory to hash. + + Returns: + A string in the form ``sha256:<64 hex chars>``. + + Raises: + LockFileError: If the directory does not exist. + """ + if not directory.is_dir(): + msg = f"Directory '{directory}' does not exist or is not a directory" + raise LockFileError(msg) + + hasher = hashlib.sha256() + + # Collect all regular files, skip .git + file_paths: list[Path] = [] + for file_path in directory.rglob("*"): + if not file_path.is_file(): + continue + if ".git" in file_path.relative_to(directory).parts: + continue + file_paths.append(file_path) + + # Sort by POSIX-normalized relative path for cross-platform determinism + file_paths.sort(key=lambda path: path.relative_to(directory).as_posix()) + + for file_path in file_paths: + relative_posix = file_path.relative_to(directory).as_posix() + hasher.update(relative_posix.encode("utf-8")) + hasher.update(file_path.read_bytes()) + + return f"{HASH_PREFIX}{hasher.hexdigest()}" + + +# --------------------------------------------------------------------------- +# TOML parse / serialize +# --------------------------------------------------------------------------- + + +def parse_lock_file(content: str) -> LockFile: + """Parse a lock file TOML string into a ``LockFile`` model. + + Args: + content: The raw TOML string. + + Returns: + A validated ``LockFile``. + + Raises: + LockFileError: If parsing or validation fails. + """ + if not content.strip(): + return LockFile() + + try: + raw = load_toml_from_content(content) + except TomlError as exc: + msg = f"Invalid TOML syntax in lock file: {exc.message}" + raise LockFileError(msg) from exc + + packages: dict[str, LockedPackage] = {} + for address, entry in raw.items(): + if not isinstance(entry, dict): + msg = f"Lock file entry for '{address}' must be a table, got {type(entry).__name__}" + raise LockFileError(msg) + entry_dict = cast("dict[str, Any]", entry) + try: + packages[str(address)] = LockedPackage(**entry_dict) + except Exception as exc: + msg = f"Invalid lock file entry for '{address}': {exc}" + raise LockFileError(msg) from exc + + return LockFile(packages=packages) + + +def serialize_lock_file(lock_file: LockFile) -> str: + """Serialize a ``LockFile`` to a TOML string. + + Entries are sorted by address for deterministic output (clean VCS diffs). + + Args: + lock_file: The lock file model to serialize. + + Returns: + A TOML-formatted string. + """ + doc = tomlkit.document() + + for address in sorted(lock_file.packages): + locked = lock_file.packages[address] + table = tomlkit.table() + table.add("version", locked.version) + table.add("hash", locked.hash) + table.add("source", locked.source) + doc.add(address, table) + + return tomlkit.dumps(doc) # type: ignore[arg-type] + + +# --------------------------------------------------------------------------- +# Lock file generation +# --------------------------------------------------------------------------- + + +def generate_lock_file( + manifest: MthdsPackageManifest, + resolved_deps: list[Any], +) -> LockFile: + """Generate a lock file from resolved dependencies. + + Locks all remote dependencies (including transitive) by using + ``resolved.address`` directly. Local path overrides from the root + manifest are excluded. + + Args: + manifest: The consuming package's manifest. + resolved_deps: List of ``ResolvedDependency`` from the resolver. + + Returns: + A ``LockFile`` with entries for remote dependencies only. + + Raises: + LockFileError: If a remote dependency has no manifest. + """ + packages: dict[str, LockedPackage] = {} + + # Build set of local-override addresses from root manifest + local_addresses = {dep.address for dep in manifest.dependencies if dep.path is not None} + + for resolved in resolved_deps: + # Skip local path overrides + if resolved.address in local_addresses: + continue + + # Remote dep must have a manifest + if resolved.manifest is None: + msg = f"Remote dependency '{resolved.alias}' ({resolved.address}) has no manifest β€” cannot generate lock entry" + raise LockFileError(msg) + + address = resolved.address + version = resolved.manifest.version + hash_value = compute_directory_hash(resolved.package_root) + source = f"https://{address}" + + packages[address] = LockedPackage( + version=version, + hash=hash_value, + source=source, + ) + + return LockFile(packages=packages) + + +# --------------------------------------------------------------------------- +# Verification +# --------------------------------------------------------------------------- + + +def verify_locked_package( + locked: LockedPackage, + address: str, + cache_root: Path | None = None, +) -> None: + """Verify a single locked package against its cached copy. + + Args: + locked: The locked package entry. + address: The package address. + cache_root: Override for the cache root directory. + + Raises: + IntegrityError: If the cached package is missing or its hash does not match. + """ + # Extract version to locate cached dir + cached_path = get_cached_package_path(address, locked.version, cache_root) + + if not cached_path.is_dir(): + msg = f"Cached package '{address}@{locked.version}' not found at '{cached_path}'" + raise IntegrityError(msg) + + actual_hash = compute_directory_hash(cached_path) + if actual_hash != locked.hash: + msg = f"Integrity check failed for '{address}@{locked.version}': expected {locked.hash}, got {actual_hash}" + raise IntegrityError(msg) + + +def verify_lock_file( + lock_file: LockFile, + cache_root: Path | None = None, +) -> None: + """Verify all entries in a lock file against the cache. + + Args: + lock_file: The lock file to verify. + cache_root: Override for the cache root directory. + + Raises: + IntegrityError: If any cached package is missing or has a hash mismatch. + """ + for address, locked in lock_file.packages.items(): + verify_locked_package(locked, address, cache_root) diff --git a/pipelex/core/packages/manifest.py b/pipelex/core/packages/manifest.py new file mode 100644 index 000000000..90444f034 --- /dev/null +++ b/pipelex/core/packages/manifest.py @@ -0,0 +1,230 @@ +import re +import unicodedata + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator + +from pipelex.core.domains.validation import is_domain_code_valid +from pipelex.core.pipes.validation import is_pipe_code_valid +from pipelex.tools.misc.string_utils import is_snake_case +from pipelex.tools.typing.pydantic_utils import empty_list_factory_of +from pipelex.types import Self + +# Semver regex: MAJOR.MINOR.PATCH with optional pre-release and build metadata +SEMVER_PATTERN = re.compile( + r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)" + r"(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?" + r"(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$" +) + +# Version constraint pattern: supports standard range syntax used by Poetry/uv. +# A single constraint is: optional operator + semver (with optional wildcard minor/patch). +# Multiple constraints can be comma-separated (e.g., ">=1.0.0, <2.0.0"). +# Supported forms: "1.0.0", "^1.0.0", "~1.0.0", ">=1.0.0", "<=1.0.0", ">1.0.0", "<1.0.0", +# "==1.0.0", "!=1.0.0", ">=1.0.0, <2.0.0", "*", "1.*", "1.0.*" +_SINGLE_CONSTRAINT = ( + r"(?:" + r"\*" # wildcard: * + r"|(?:(?:\^|~|>=?|<=?|==|!=)?(?:0|[1-9]\d*)(?:\.(?:0|[1-9]\d*|\*))?(?:\.(?:0|[1-9]\d*|\*))?)" # [op]MAJOR[.MINOR[.PATCH]] + r"(?:-(?:(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?)" # optional prerelease +) +VERSION_CONSTRAINT_PATTERN = re.compile(rf"^{_SINGLE_CONSTRAINT}(?:\s*,\s*{_SINGLE_CONSTRAINT})*$") + +# Address pattern: must contain at least one dot before a slash (hostname pattern) +# e.g. "github.com/org/repo", "example.io/pkg" +ADDRESS_PATTERN = re.compile(r"^[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+/[a-zA-Z0-9._/-]+$") + +RESERVED_DOMAINS: frozenset[str] = frozenset({"native", "mthds", "pipelex"}) + +MTHDS_STANDARD_VERSION: str = "1.0.0" + + +def is_reserved_domain_path(domain_path: str) -> bool: + """Check if a domain path starts with a reserved domain segment.""" + first_segment = domain_path.split(".", maxsplit=1)[0] + return first_segment in RESERVED_DOMAINS + + +def is_valid_semver(version: str) -> bool: + """Check if a version string is valid semver.""" + return SEMVER_PATTERN.match(version) is not None + + +def is_valid_version_constraint(constraint: str) -> bool: + """Check if a version constraint string is valid. + + Supports standard range syntax used by Poetry/uv: + - Exact: "1.0.0" + - Caret: "^1.0.0" (compatible release) + - Tilde: "~1.0.0" (approximately compatible) + - Comparison: ">=1.0.0", "<=1.0.0", ">1.0.0", "<1.0.0", "==1.0.0", "!=1.0.0" + - Compound: ">=1.0.0, <2.0.0" + - Wildcard: "*", "1.*", "1.0.*" + """ + return VERSION_CONSTRAINT_PATTERN.match(constraint.strip()) is not None + + +def is_valid_address(address: str) -> bool: + """Check if an address contains at least one dot before a slash (hostname pattern).""" + return ADDRESS_PATTERN.match(address) is not None + + +class PackageDependency(BaseModel): + """A dependency on another MTHDS package.""" + + model_config = ConfigDict(extra="forbid") + + address: str + version: str + alias: str + path: str | None = None + + @field_validator("address") + @classmethod + def validate_address(cls, address: str) -> str: + if not is_valid_address(address): + msg = f"Invalid package address '{address}'. Address must follow hostname/path pattern (e.g. 'github.com/org/repo')." + raise ValueError(msg) + return address + + @field_validator("version") + @classmethod + def validate_version(cls, version: str) -> str: + if not is_valid_version_constraint(version): + msg = f"Invalid version constraint '{version}'. Must be a valid version range (e.g. '1.0.0', '^1.0.0', '>=1.0.0, <2.0.0')." + raise ValueError(msg) + return version + + @field_validator("alias") + @classmethod + def validate_alias(cls, alias: str) -> str: + if not is_snake_case(alias): + msg = f"Invalid dependency alias '{alias}'. Must be snake_case." + raise ValueError(msg) + return alias + + +class DomainExports(BaseModel): + """Exports for a single domain within a package.""" + + model_config = ConfigDict(extra="forbid") + + domain_path: str + pipes: list[str] = Field(default_factory=list) + + @field_validator("domain_path") + @classmethod + def validate_domain_path(cls, domain_path: str) -> str: + if not is_domain_code_valid(domain_path): + msg = f"Invalid domain path '{domain_path}' in [exports]. Domain paths must be dot-separated snake_case segments." + raise ValueError(msg) + if is_reserved_domain_path(domain_path): + first_segment = domain_path.split(".", maxsplit=1)[0] + msg = ( + f"Domain path '{domain_path}' uses reserved domain '{first_segment}'. " + f"Reserved domains ({', '.join(sorted(RESERVED_DOMAINS))}) cannot be used in package exports." + ) + raise ValueError(msg) + return domain_path + + @field_validator("pipes") + @classmethod + def validate_pipes(cls, pipes: list[str]) -> list[str]: + for pipe_name in pipes: + if not is_pipe_code_valid(pipe_name): + msg = f"Invalid pipe name '{pipe_name}' in [exports]. Pipe names must be in snake_case." + raise ValueError(msg) + return pipes + + +class MthdsPackageManifest(BaseModel): + """The METHODS.toml package manifest model.""" + + model_config = ConfigDict(extra="forbid") + + address: str + display_name: str | None = None + version: str + description: str + authors: list[str] = Field(default_factory=list) + license: str | None = None + mthds_version: str | None = None + + dependencies: list[PackageDependency] = Field(default_factory=empty_list_factory_of(PackageDependency)) + exports: list[DomainExports] = Field(default_factory=empty_list_factory_of(DomainExports)) + + @field_validator("address") + @classmethod + def validate_address(cls, address: str) -> str: + if not is_valid_address(address): + msg = f"Invalid package address '{address}'. Address must follow hostname/path pattern (e.g. 'github.com/org/repo')." + raise ValueError(msg) + return address + + @field_validator("version") + @classmethod + def validate_version(cls, version: str) -> str: + if not is_valid_semver(version): + msg = f"Invalid version '{version}'. Must be valid semver (e.g. '1.0.0', '2.1.3-beta.1')." + raise ValueError(msg) + return version + + @field_validator("display_name") + @classmethod + def validate_display_name(cls, display_name: str | None) -> str | None: + if display_name is None: + return None + stripped = display_name.strip() + if not stripped: + msg = "Display name must not be empty or whitespace when provided." + raise ValueError(msg) + if len(stripped) > 128: + msg = f"Display name must not exceed 128 characters (got {len(stripped)})." + raise ValueError(msg) + if any(unicodedata.category(char) == "Cc" for char in stripped): + msg = "Display name must not contain control characters." + raise ValueError(msg) + return stripped + + @field_validator("description") + @classmethod + def validate_description(cls, description: str) -> str: + if not description.strip(): + msg = "Package description must not be empty." + raise ValueError(msg) + return description + + @field_validator("authors") + @classmethod + def validate_authors(cls, authors: list[str]) -> list[str]: + for index_author, author in enumerate(authors): + if not author.strip(): + msg = f"Author at index {index_author} must not be empty or whitespace." + raise ValueError(msg) + return authors + + @field_validator("license") + @classmethod + def validate_license(cls, license_value: str | None) -> str | None: + if license_value is not None and not license_value.strip(): + msg = "License must not be empty or whitespace when provided." + raise ValueError(msg) + return license_value + + @field_validator("mthds_version") + @classmethod + def validate_mthds_version(cls, mthds_version: str | None) -> str | None: + if mthds_version is not None and not is_valid_version_constraint(mthds_version): + msg = f"Invalid mthds_version constraint '{mthds_version}'. Must be a valid version constraint (e.g. '1.0.0', '^1.0.0', '>=1.0.0')." + raise ValueError(msg) + return mthds_version + + @model_validator(mode="after") + def validate_unique_dependency_aliases(self) -> Self: + """Ensure all dependency aliases are unique.""" + seen_aliases: set[str] = set() + for dep in self.dependencies: + if dep.alias in seen_aliases: + msg = f"Duplicate dependency alias '{dep.alias}'. Each dependency must have a unique alias." + raise ValueError(msg) + seen_aliases.add(dep.alias) + return self diff --git a/pipelex/core/packages/manifest_parser.py b/pipelex/core/packages/manifest_parser.py new file mode 100644 index 000000000..032202cd2 --- /dev/null +++ b/pipelex/core/packages/manifest_parser.py @@ -0,0 +1,207 @@ +from typing import Any, cast + +import tomlkit +from pydantic import ValidationError + +from pipelex.core.packages.exceptions import ManifestParseError, ManifestValidationError +from pipelex.core.packages.manifest import DomainExports, MthdsPackageManifest, PackageDependency +from pipelex.tools.misc.toml_utils import TomlError, load_toml_from_content + + +def _walk_exports_table(table: dict[str, Any], prefix: str = "") -> list[DomainExports]: + """Recursively walk nested exports sub-tables to reconstruct dotted domain paths. + + Given a TOML structure like: + [exports.legal.contracts] + pipes = ["extract_clause"] + + This produces DomainExports(domain_path="legal.contracts", pipes=["extract_clause"]). + + Args: + table: The current dict-level of the exports table + prefix: The dotted path prefix accumulated so far + + Returns: + List of DomainExports built from nested sub-tables + """ + result: list[DomainExports] = [] + + for key, value in table.items(): + current_path = f"{prefix}.{key}" if prefix else str(key) + + if isinstance(value, dict): + value_dict = cast("dict[str, Any]", value) + # Check if this level has a "pipes" key (leaf domain) + if "pipes" in value_dict: + pipes_value = value_dict["pipes"] + if not isinstance(pipes_value, list): + msg = f"'pipes' in domain '{current_path}' must be a list, got {type(pipes_value).__name__}" + raise ManifestValidationError(msg) + pipes_list = cast("list[str]", pipes_value) + result.append(DomainExports(domain_path=current_path, pipes=pipes_list)) + + # Also recurse into remaining sub-tables (a domain can have both pipes and sub-domains) + for sub_key, sub_value in value_dict.items(): + if sub_key != "pipes" and isinstance(sub_value, dict): + sub_dict = cast("dict[str, Any]", {sub_key: sub_value}) + result.extend(_walk_exports_table(sub_dict, prefix=current_path)) + else: + # No pipes at this level, just recurse deeper + result.extend(_walk_exports_table(value_dict, prefix=current_path)) + + return result + + +def parse_methods_toml(content: str) -> MthdsPackageManifest: + """Parse METHODS.toml content into an MthdsPackageManifest model. + + Args: + content: The raw TOML string + + Returns: + A validated MthdsPackageManifest + + Raises: + ManifestParseError: If the TOML syntax is invalid + ManifestValidationError: If the parsed data fails model validation + """ + try: + raw = load_toml_from_content(content) + except TomlError as exc: + msg = f"Invalid TOML syntax in METHODS.toml: {exc.message}" + raise ManifestParseError(msg) from exc + + # Extract [package] section + package_section = raw.get("package") + if not isinstance(package_section, dict): + msg = "METHODS.toml must contain a [package] section" + raise ManifestValidationError(msg) + pkg = cast("dict[str, Any]", package_section) + + # Extract [dependencies] section + deps_section = raw.get("dependencies", {}) + dependencies: list[PackageDependency] = [] + if isinstance(deps_section, dict): + deps_dict = cast("dict[str, Any]", deps_section) + for alias, dep_data in deps_dict.items(): + if isinstance(dep_data, dict): + dep_data_dict = cast("dict[str, Any]", dep_data) + dep_data_dict["alias"] = str(alias) + try: + dependencies.append(PackageDependency(**dep_data_dict)) + except ValidationError as exc: + msg = f"Invalid dependency '{alias}' in METHODS.toml: {exc}" + raise ManifestValidationError(msg) from exc + else: + msg = ( + f"Invalid dependency '{alias}' in METHODS.toml: expected a table with 'address' and 'version' keys, got {type(dep_data).__name__}" + ) + raise ManifestValidationError(msg) + + # Extract [exports] section with recursive walk + exports_section = raw.get("exports", {}) + exports: list[DomainExports] = [] + if isinstance(exports_section, dict): + exports_dict = cast("dict[str, Any]", exports_section) + try: + exports = _walk_exports_table(exports_dict) + except ValidationError as exc: + msg = f"Invalid exports in METHODS.toml: {exc}" + raise ManifestValidationError(msg) from exc + + # Reject unknown keys in [package] section + known_package_keys = {"address", "display_name", "version", "description", "authors", "license", "mthds_version"} + unknown_keys = set(pkg.keys()) - known_package_keys + if unknown_keys: + msg = f"Unknown keys in [package] section: {', '.join(sorted(unknown_keys))}" + raise ManifestValidationError(msg) + + # Build the manifest + address: str = str(pkg.get("address", "")) + version: str = str(pkg.get("version", "")) + description: str = str(pkg.get("description", "")) + authors_val = pkg.get("authors", []) + authors: list[str] = cast("list[str]", authors_val) if isinstance(authors_val, list) else [] + license_val = pkg.get("license") + license_str: str | None = str(license_val) if license_val is not None else None + mthds_version_val = pkg.get("mthds_version") + mthds_version: str | None = str(mthds_version_val) if mthds_version_val is not None else None + display_name_val = pkg.get("display_name") + display_name: str | None = str(display_name_val) if display_name_val is not None else None + + try: + manifest = MthdsPackageManifest( + address=address, + display_name=display_name, + version=version, + description=description, + authors=authors, + license=license_str, + mthds_version=mthds_version, + dependencies=dependencies, + exports=exports, + ) + except ValidationError as exc: + msg = f"METHODS.toml validation failed: {exc}" + raise ManifestValidationError(msg) from exc + + return manifest + + +def serialize_manifest_to_toml(manifest: MthdsPackageManifest) -> str: + """Serialize an MthdsPackageManifest to a human-readable TOML string. + + Args: + manifest: The manifest model to serialize + + Returns: + A TOML-formatted string + """ + doc = tomlkit.document() + + # [package] section + package_table = tomlkit.table() + package_table.add("address", manifest.address) + if manifest.display_name is not None: + package_table.add("display_name", manifest.display_name) + package_table.add("version", manifest.version) + package_table.add("description", manifest.description) + if manifest.authors: + package_table.add("authors", manifest.authors) + if manifest.license is not None: + package_table.add("license", manifest.license) + if manifest.mthds_version is not None: + package_table.add("mthds_version", manifest.mthds_version) + doc.add("package", package_table) + + # [dependencies] section + if manifest.dependencies: + doc.add(tomlkit.nl()) + deps_table = tomlkit.table() + for dep in manifest.dependencies: + dep_table = tomlkit.inline_table() + dep_table.append("address", dep.address) + dep_table.append("version", dep.version) + if dep.path is not None: + dep_table.append("path", dep.path) + deps_table.add(dep.alias, dep_table) + doc.add("dependencies", deps_table) + + # [exports] section β€” build nested tables from dotted domain paths + if manifest.exports: + doc.add(tomlkit.nl()) + exports_table = tomlkit.table(is_super_table=True) + + for domain_export in manifest.exports: + segments = domain_export.domain_path.split(".") + # Navigate/create nested tables + current: Any = exports_table + for segment in segments: + if segment not in current: + current.add(segment, tomlkit.table()) + current = current[segment] + current.add("pipes", domain_export.pipes) + + doc.add("exports", exports_table) + + return tomlkit.dumps(doc) # type: ignore[arg-type] diff --git a/pipelex/core/packages/package_cache.py b/pipelex/core/packages/package_cache.py new file mode 100644 index 000000000..502450cc8 --- /dev/null +++ b/pipelex/core/packages/package_cache.py @@ -0,0 +1,140 @@ +"""Local package cache for fetched remote MTHDS dependencies. + +Cache layout: ``{cache_root}/{address}/{version}/`` +(e.g. ``~/.mthds/packages/github.com/org/repo/1.0.0/``). + +Uses a staging directory + atomic rename for safe writes. +""" + +import shutil +from pathlib import Path + +from pipelex.core.packages.exceptions import PackageCacheError + + +def get_default_cache_root() -> Path: + """Return the default cache root directory. + + Returns: + ``~/.mthds/packages`` + """ + return Path.home() / ".mthds" / "packages" + + +def get_cached_package_path( + address: str, + version: str, + cache_root: Path | None = None, +) -> Path: + """Compute the cache path for a package version. + + Args: + address: Package address, e.g. ``github.com/org/repo``. + version: Resolved version string, e.g. ``1.0.0``. + cache_root: Override for the cache root directory. + + Returns: + The directory path where this package version would be cached. + """ + root = cache_root or get_default_cache_root() + return root / address / version + + +def is_cached( + address: str, + version: str, + cache_root: Path | None = None, +) -> bool: + """Check whether a package version exists in the cache. + + A directory is considered cached if it exists and is non-empty. + + Args: + address: Package address. + version: Resolved version string. + cache_root: Override for the cache root directory. + + Returns: + True if the cached directory exists and is non-empty. + """ + pkg_path = get_cached_package_path(address, version, cache_root) + if not pkg_path.is_dir(): + return False + return any(pkg_path.iterdir()) + + +def store_in_cache( + source_dir: Path, + address: str, + version: str, + cache_root: Path | None = None, +) -> Path: + """Copy a package directory into the cache. + + Uses a staging directory (``{path}.staging``) and an atomic rename for + safe writes. Removes the ``.git/`` subdirectory from the cached copy. + + Args: + source_dir: The directory to copy from (e.g. a fresh clone). + address: Package address. + version: Resolved version string. + cache_root: Override for the cache root directory. + + Returns: + The final cache path. + + Raises: + PackageCacheError: If copying or renaming fails. + """ + final_path = get_cached_package_path(address, version, cache_root) + staging_path = final_path.parent / f"{final_path.name}.staging" + + try: + # Clean up any leftover staging dir + if staging_path.exists(): + shutil.rmtree(staging_path) + + # Copy source into staging + shutil.copytree(source_dir, staging_path) + + # Remove .git/ from the staged copy + git_dir = staging_path / ".git" + if git_dir.exists(): + shutil.rmtree(git_dir) + + # Ensure parent exists and perform atomic rename + final_path.parent.mkdir(parents=True, exist_ok=True) + if final_path.exists(): + shutil.rmtree(final_path) + staging_path.rename(final_path) + + except OSError as exc: + # Clean up staging on failure + if staging_path.exists(): + shutil.rmtree(staging_path, ignore_errors=True) + msg = f"Failed to store package '{address}@{version}' in cache: {exc}" + raise PackageCacheError(msg) from exc + + return final_path + + +def remove_cached_package( + address: str, + version: str, + cache_root: Path | None = None, +) -> bool: + """Remove a cached package version. + + Args: + address: Package address. + version: Resolved version string. + cache_root: Override for the cache root directory. + + Returns: + True if the directory existed and was removed, False otherwise. + """ + pkg_path = get_cached_package_path(address, version, cache_root) + if not pkg_path.exists(): + return False + shutil.rmtree(pkg_path) + return True diff --git a/pipelex/core/packages/publish_validation.py b/pipelex/core/packages/publish_validation.py new file mode 100644 index 000000000..f87fd03a8 --- /dev/null +++ b/pipelex/core/packages/publish_validation.py @@ -0,0 +1,459 @@ +"""Publish validation logic for MTHDS packages. + +Validates that a package is ready for distribution by checking manifest +completeness, export consistency, bundle validity, dependency pinning, +lock file freshness, and git tag readiness. +""" + +import subprocess # noqa: S404 +from pathlib import Path + +from pydantic import BaseModel, ConfigDict, Field + +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.core.packages.bundle_scanner import scan_bundles_for_domain_info +from pipelex.core.packages.dependency_resolver import collect_mthds_files +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.exceptions import LockFileError, ManifestError, PublishValidationError +from pipelex.core.packages.lock_file import LOCK_FILENAME, parse_lock_file +from pipelex.core.packages.manifest import MTHDS_STANDARD_VERSION, MthdsPackageManifest +from pipelex.core.packages.manifest_parser import parse_methods_toml +from pipelex.core.packages.visibility import check_visibility_for_blueprints +from pipelex.tools.misc.semver import SemVerError, parse_constraint, parse_version, version_satisfies +from pipelex.tools.typing.pydantic_utils import empty_list_factory_of +from pipelex.types import StrEnum + + +class IssueLevel(StrEnum): + """Severity level for a publish validation issue.""" + + ERROR = "error" + WARNING = "warning" + + @property + def is_error(self) -> bool: + match self: + case IssueLevel.ERROR: + return True + case IssueLevel.WARNING: + return False + + @property + def is_warning(self) -> bool: + match self: + case IssueLevel.ERROR: + return False + case IssueLevel.WARNING: + return True + + +class IssueCategory(StrEnum): + """Category of a publish validation issue.""" + + MANIFEST = "manifest" + BUNDLE = "bundle" + EXPORT = "export" + DEPENDENCY = "dependency" + LOCK_FILE = "lock_file" + GIT = "git" + VISIBILITY = "visibility" + + +class PublishValidationIssue(BaseModel): + """A single validation issue found during publish readiness check.""" + + model_config = ConfigDict(frozen=True) + + level: IssueLevel = Field(strict=False) + category: IssueCategory = Field(strict=False) + message: str + suggestion: str | None = None + + +class PublishValidationResult(BaseModel): + """Aggregated result of publish validation.""" + + model_config = ConfigDict(frozen=True) + + issues: list[PublishValidationIssue] = Field(default_factory=empty_list_factory_of(PublishValidationIssue)) + package_version: str | None = None + + @property + def is_publishable(self) -> bool: + """Package is publishable if there are no ERROR-level issues.""" + return not any(issue.level.is_error for issue in self.issues) + + +# --------------------------------------------------------------------------- +# Private validation helpers +# --------------------------------------------------------------------------- + + +def _check_manifest_exists(package_root: Path) -> tuple[MthdsPackageManifest | None, list[PublishValidationIssue]]: + """Check that METHODS.toml exists and parses successfully. + + Returns: + Tuple of (parsed manifest or None, list of issues) + """ + manifest_path = package_root / MANIFEST_FILENAME + if not manifest_path.is_file(): + return None, [ + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.MANIFEST, + message=f"{MANIFEST_FILENAME} not found in {package_root}", + suggestion=f"Create a {MANIFEST_FILENAME} with 'pipelex pkg init'", + ) + ] + + content = manifest_path.read_text(encoding="utf-8") + try: + manifest = parse_methods_toml(content) + except ManifestError as exc: + return None, [ + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.MANIFEST, + message=f"{MANIFEST_FILENAME} parse error: {exc.message}", + ) + ] + + return manifest, [] + + +def _check_manifest_fields(manifest: MthdsPackageManifest) -> list[PublishValidationIssue]: + """Check manifest field completeness (authors, license). + + Note: address, version, and description are validated by Pydantic validators + in MthdsPackageManifest during parse_methods_toml(). If parsing succeeded, + those fields are guaranteed valid β€” no need to re-check here. + """ + issues: list[PublishValidationIssue] = [] + + if not manifest.authors: + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.MANIFEST, + message="No authors specified", + suggestion='Add authors = ["Your Name"] to [package] in METHODS.toml', + ) + ) + + if not manifest.license: + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.MANIFEST, + message="No license specified", + suggestion='Add license = "MIT" (or other) to [package] in METHODS.toml', + ) + ) + + return issues + + +def _check_mthds_version(manifest: MthdsPackageManifest) -> list[PublishValidationIssue]: + """Check that mthds_version, if specified, is parseable and satisfiable by the current standard version.""" + issues: list[PublishValidationIssue] = [] + if manifest.mthds_version is None: + return issues + try: + constraint = parse_constraint(manifest.mthds_version) + except SemVerError: + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.MANIFEST, + message=f"mthds_version constraint '{manifest.mthds_version}' is not parseable by the semver engine", + suggestion="Use a valid version constraint (e.g. '1.0.0', '^1.0.0', '>=1.0.0')", + ) + ) + return issues + + # Check satisfiability against the current MTHDS standard version + current_version = parse_version(MTHDS_STANDARD_VERSION) + if not version_satisfies(current_version, constraint): + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.MANIFEST, + message=( + f"mthds_version constraint '{manifest.mthds_version}' is not satisfied by " + f"the current MTHDS standard version '{MTHDS_STANDARD_VERSION}'" + ), + suggestion="Verify this is intentional if targeting a future standard version", + ) + ) + return issues + + +def _check_bundles( + package_root: Path, +) -> tuple[dict[str, list[str]], list[PipelexBundleBlueprint], list[PublishValidationIssue]]: + """Check that .mthds files exist and parse without error. + + Returns: + Tuple of (domain_pipes mapping, parsed blueprints, list of issues) + """ + issues: list[PublishValidationIssue] = [] + + mthds_files = collect_mthds_files(package_root) + if not mthds_files: + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.BUNDLE, + message="No .mthds files found in package", + suggestion="Add at least one .mthds bundle file", + ) + ) + return {}, [], issues + + domain_pipes, _domain_main_pipes, blueprints, scan_errors = scan_bundles_for_domain_info(mthds_files) + + for error in scan_errors: + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.BUNDLE, + message=f"Bundle parse error: {error}", + ) + ) + + return domain_pipes, blueprints, issues + + +def _check_exports(manifest: MthdsPackageManifest, domain_pipes: dict[str, list[str]]) -> list[PublishValidationIssue]: + """Check that exported pipes actually exist in scanned bundles.""" + issues: list[PublishValidationIssue] = [] + + for domain_export in manifest.exports: + domain_path = domain_export.domain_path + actual_pipes = set(domain_pipes.get(domain_path, [])) + + for pipe_code in domain_export.pipes: + if pipe_code not in actual_pipes: + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.EXPORT, + message=f"Exported pipe '{pipe_code}' in domain '{domain_path}' not found in bundles", + suggestion=f"Remove '{pipe_code}' from [exports.{domain_path}] or add it to a .mthds file", + ) + ) + + return issues + + +def _check_visibility(manifest: MthdsPackageManifest, blueprints: list[PipelexBundleBlueprint]) -> list[PublishValidationIssue]: + """Check cross-domain visibility rules using already-parsed blueprints.""" + issues: list[PublishValidationIssue] = [] + + visibility_errors = check_visibility_for_blueprints(manifest, blueprints) + for vis_error in visibility_errors: + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.VISIBILITY, + message=vis_error.message, + ) + ) + + return issues + + +def _check_dependencies(manifest: MthdsPackageManifest) -> list[PublishValidationIssue]: + """Check that dependencies have pinned versions (not wildcard *).""" + issues: list[PublishValidationIssue] = [] + + for dep in manifest.dependencies: + if dep.version == "*": + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.DEPENDENCY, + message=f"Dependency '{dep.alias}' has wildcard version '*'", + suggestion=f"Pin '{dep.alias}' to a specific version (e.g. '1.0.0' or '^1.0.0')", + ) + ) + + return issues + + +def _check_lock_file(manifest: MthdsPackageManifest, package_root: Path) -> list[PublishValidationIssue]: + """Check lock file existence and consistency for remote dependencies.""" + issues: list[PublishValidationIssue] = [] + + remote_deps = [dep for dep in manifest.dependencies if dep.path is None] + if not remote_deps: + return issues + + lock_path = package_root / LOCK_FILENAME + if not lock_path.is_file(): + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.LOCK_FILE, + message=f"{LOCK_FILENAME} not found but package has remote dependencies", + suggestion="Run 'pipelex pkg lock' to generate the lock file", + ) + ) + return issues + + # Parse lock file and compare addresses + content = lock_path.read_text(encoding="utf-8") + try: + lock_file = parse_lock_file(content) + except LockFileError as exc: + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.LOCK_FILE, + message=f"Failed to parse {LOCK_FILENAME}: {exc}", + ) + ) + return issues + + remote_addresses = {dep.address for dep in remote_deps} + locked_addresses = set(lock_file.packages.keys()) + + missing_from_lock = remote_addresses - locked_addresses + for address in sorted(missing_from_lock): + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.LOCK_FILE, + message=f"Remote dependency '{address}' not found in {LOCK_FILENAME}", + suggestion="Run 'pipelex pkg lock' to update the lock file", + ) + ) + + return issues + + +def _check_git(manifest: MthdsPackageManifest, package_root: Path) -> list[PublishValidationIssue]: + """Check git working directory status and tag availability.""" + issues: list[PublishValidationIssue] = [] + + # Check working directory is clean + try: + result = subprocess.run( + ["git", "status", "--porcelain"], # noqa: S607 + capture_output=True, + text=True, + check=True, + timeout=10, + cwd=package_root, + ) + if result.stdout.strip(): + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.GIT, + message="Git working directory has uncommitted changes", + suggestion="Commit or stash changes before publishing", + ) + ) + except (FileNotFoundError, subprocess.CalledProcessError, subprocess.TimeoutExpired): + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.GIT, + message="Could not check git status (git not available or not a git repository)", + ) + ) + return issues + + # Check tag does not already exist + version_tag = f"v{manifest.version}" + try: + result = subprocess.run( # noqa: S603 + ["git", "tag", "-l", version_tag], # noqa: S607 + capture_output=True, + text=True, + check=True, + timeout=10, + cwd=package_root, + ) + if result.stdout.strip(): + issues.append( + PublishValidationIssue( + level=IssueLevel.ERROR, + category=IssueCategory.GIT, + message=f"Git tag '{version_tag}' already exists", + suggestion=f"Bump the version in {MANIFEST_FILENAME} or delete the existing tag", + ) + ) + except (FileNotFoundError, subprocess.CalledProcessError, subprocess.TimeoutExpired): + issues.append( + PublishValidationIssue( + level=IssueLevel.WARNING, + category=IssueCategory.GIT, + message=f"Could not verify whether git tag '{version_tag}' already exists", + suggestion="Manually check existing tags with `git tag -l` before publishing", + ) + ) + + return issues + + +# --------------------------------------------------------------------------- +# Public API +# --------------------------------------------------------------------------- + + +def validate_for_publish(package_root: Path, check_git: bool = True) -> PublishValidationResult: + """Validate a package's readiness for distribution. + + Runs all validation checks and returns an aggregated result. + + Args: + package_root: Path to the package root directory + check_git: Whether to run git-related checks (disable in tests without git repos) + + Returns: + PublishValidationResult with all issues found + + Raises: + PublishValidationError: If the package root does not exist + """ + if not package_root.is_dir(): + msg = f"Package root '{package_root}' does not exist or is not a directory" + raise PublishValidationError(msg) + + all_issues: list[PublishValidationIssue] = [] + + # 1. Check manifest exists and parses + manifest, manifest_issues = _check_manifest_exists(package_root) + all_issues.extend(manifest_issues) + + if manifest is None: + return PublishValidationResult(issues=all_issues, package_version=None) + + # 2-6. Check manifest fields + all_issues.extend(_check_manifest_fields(manifest)) + all_issues.extend(_check_mthds_version(manifest)) + + # 7-8. Check bundles exist and parse + domain_pipes, blueprints, bundle_issues = _check_bundles(package_root) + all_issues.extend(bundle_issues) + + # 9. Check exports consistency + all_issues.extend(_check_exports(manifest, domain_pipes)) + + # 10. Check visibility rules + if blueprints: + all_issues.extend(_check_visibility(manifest, blueprints)) + + # 11. Check dependency pinning + all_issues.extend(_check_dependencies(manifest)) + + # 12-13. Check lock file + all_issues.extend(_check_lock_file(manifest, package_root)) + + # 14-15. Check git status + if check_git: + all_issues.extend(_check_git(manifest, package_root)) + + return PublishValidationResult(issues=all_issues, package_version=manifest.version) diff --git a/pipelex/core/packages/vcs_resolver.py b/pipelex/core/packages/vcs_resolver.py new file mode 100644 index 000000000..8ebe5909e --- /dev/null +++ b/pipelex/core/packages/vcs_resolver.py @@ -0,0 +1,159 @@ +# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false, reportUnknownParameterType=false, reportUnknownArgumentType=false +"""Git-based VCS operations for remote dependency fetching. + +Maps package addresses to clone URLs, lists remote version tags, selects +versions via MVS, and clones at a specific tag. +""" + +import subprocess # noqa: S404 +from pathlib import Path + +from semantic_version import Version # type: ignore[import-untyped] + +from pipelex.core.packages.exceptions import VCSFetchError, VersionResolutionError +from pipelex.tools.misc.semver import parse_constraint, parse_version_tag, select_minimum_version + + +def address_to_clone_url(address: str) -> str: + """Map a package address to a git clone URL. + + Prepends ``https://`` and appends ``.git`` (unless already present). + + Args: + address: Package address, e.g. ``github.com/org/repo``. + + Returns: + The HTTPS clone URL, e.g. ``https://github.com/org/repo.git``. + """ + url = f"https://{address}" + if not url.endswith(".git"): + url = f"{url}.git" + return url + + +def list_remote_version_tags(clone_url: str) -> list[tuple[Version, str]]: + """List remote git tags that are valid semver versions. + + Runs ``git ls-remote --tags `` and parses the output, filtering + through :func:`parse_version_tag`. Dereferenced tag entries (``^{}``) + are skipped. + + Args: + clone_url: The git clone URL to query. + + Returns: + List of ``(Version, original_tag_name)`` tuples. + + Raises: + VCSFetchError: If the git command fails or git is not installed. + """ + try: + result = subprocess.run( # noqa: S603 + ["git", "ls-remote", "--tags", clone_url], # noqa: S607 + capture_output=True, + text=True, + check=True, + timeout=60, + ) + except FileNotFoundError as exc: + msg = "git is not installed or not found on PATH" + raise VCSFetchError(msg) from exc + except subprocess.CalledProcessError as exc: + msg = f"Failed to list remote tags from '{clone_url}': {exc.stderr.strip()}" + raise VCSFetchError(msg) from exc + except subprocess.TimeoutExpired as exc: + msg = f"Timed out listing remote tags from '{clone_url}'" + raise VCSFetchError(msg) from exc + + version_tags: list[tuple[Version, str]] = [] + for line in result.stdout.strip().splitlines(): + if not line.strip(): + continue + parts = line.split("\t") + if len(parts) < 2: + continue + ref = parts[1] + + # Skip dereferenced tags + if ref.endswith("^{}"): + continue + + # Extract tag name from refs/tags/... + tag_name = ref.removeprefix("refs/tags/") + version = parse_version_tag(tag_name) + if version is not None: + version_tags.append((version, tag_name)) + + return version_tags + + +def resolve_version_from_tags( + version_tags: list[tuple[Version, str]], + version_constraint: str, +) -> tuple[Version, str]: + """Select the minimum version matching a constraint from a list of tags. + + Uses :func:`parse_constraint` and :func:`select_minimum_version` from the + semver module (MVS strategy). + + Args: + version_tags: List of ``(Version, original_tag_name)`` tuples. + version_constraint: The constraint string, e.g. ``^1.0.0``. + + Returns: + Tuple of ``(selected_version, original_tag_name)``. + + Raises: + VersionResolutionError: If no version satisfies the constraint. + """ + if not version_tags: + msg = f"No version tags available to satisfy constraint '{version_constraint}'" + raise VersionResolutionError(msg) + + constraint = parse_constraint(version_constraint) + versions = [entry[0] for entry in version_tags] + selected = select_minimum_version(versions, constraint) + + if selected is None: + available_str = ", ".join(str(entry[0]) for entry in sorted(version_tags)) + msg = f"No version satisfying '{version_constraint}' found among: {available_str}" + raise VersionResolutionError(msg) + + # Find the corresponding tag name + for ver, tag_name in version_tags: + if ver == selected: + return (selected, tag_name) + + # Unreachable since selected came from versions list, but satisfy type checker + msg = f"Internal error: selected version {selected} not found in tag list" + raise VersionResolutionError(msg) + + +def clone_at_version(clone_url: str, version_tag: str, destination: Path) -> None: + """Clone a git repository at a specific tag with depth 1. + + Args: + clone_url: The git clone URL. + version_tag: The tag to check out (e.g. ``v1.0.0``). + destination: The local directory to clone into. + + Raises: + VCSFetchError: If the clone operation fails. + """ + try: + subprocess.run( # noqa: S603 + ["git", "clone", "--depth", "1", "--branch", version_tag, clone_url, str(destination)], # noqa: S607 + capture_output=True, + text=True, + check=True, + timeout=120, + ) + except FileNotFoundError as exc: + msg = "git is not installed or not found on PATH" + raise VCSFetchError(msg) from exc + except subprocess.CalledProcessError as exc: + msg = f"Failed to clone '{clone_url}' at tag '{version_tag}': {exc.stderr.strip()}" + raise VCSFetchError(msg) from exc + except subprocess.TimeoutExpired as exc: + msg = f"Timed out cloning '{clone_url}' at tag '{version_tag}'" + raise VCSFetchError(msg) from exc diff --git a/pipelex/core/packages/visibility.py b/pipelex/core/packages/visibility.py new file mode 100644 index 000000000..2357c597c --- /dev/null +++ b/pipelex/core/packages/visibility.py @@ -0,0 +1,229 @@ +from pydantic import BaseModel, ConfigDict + +from pipelex import log +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.core.packages.manifest import RESERVED_DOMAINS, MthdsPackageManifest, is_reserved_domain_path +from pipelex.core.qualified_ref import QualifiedRef, QualifiedRefError +from pipelex.pipe_controllers.condition.special_outcome import SpecialOutcome + + +class VisibilityError(BaseModel): + """A single visibility violation.""" + + model_config = ConfigDict(frozen=True) + + pipe_ref: str + source_domain: str + target_domain: str + context: str + message: str + + +class PackageVisibilityChecker: + """Checks cross-domain pipe visibility against a manifest's exports. + + If no manifest is provided, all pipes are considered public (backward compat). + """ + + def __init__( + self, + manifest: MthdsPackageManifest | None, + bundles: list[PipelexBundleBlueprint], + ): + self._manifest = manifest + self._bundles = bundles + + # Build lookup: exported_pipes[domain_path] = set of pipe codes + self._exported_pipes: dict[str, set[str]] = {} + if manifest: + for domain_export in manifest.exports: + self._exported_pipes[domain_export.domain_path] = set(domain_export.pipes) + + # Build lookup: main_pipes[domain_path] = main_pipe code (auto-exported) + self._main_pipes: dict[str, str] = {} + for bundle in bundles: + if bundle.main_pipe: + existing = self._main_pipes.get(bundle.domain) + if existing and existing != bundle.main_pipe: + log.warning(f"Conflicting main_pipe for domain '{bundle.domain}': '{existing}' vs '{bundle.main_pipe}' β€” keeping first value") + else: + self._main_pipes[bundle.domain] = bundle.main_pipe + + def is_pipe_accessible_from(self, pipe_ref: QualifiedRef, source_domain: str) -> bool: + """Check if a domain-qualified pipe ref is accessible from source_domain. + + Args: + pipe_ref: The parsed pipe reference + source_domain: The domain making the reference + + Returns: + True if the pipe is accessible + """ + # No manifest -> all pipes public + if self._manifest is None: + return True + + # Bare ref -> always allowed (no domain check) + if not pipe_ref.is_qualified: + return True + + # Same-domain ref -> always allowed + if pipe_ref.is_local_to(source_domain): + return True + + target_domain = pipe_ref.domain_path + assert target_domain is not None + pipe_code = pipe_ref.local_code + + # Check if it's in exports + exported = self._exported_pipes.get(target_domain, set()) + if pipe_code in exported: + return True + + # Check if it's a main_pipe (auto-exported) + main_pipe = self._main_pipes.get(target_domain) + return bool(main_pipe and pipe_code == main_pipe) + + def validate_all_pipe_references(self) -> list[VisibilityError]: + """Validate all cross-domain pipe refs across all bundles. + + Returns: + List of VisibilityError for each violation found + """ + # No manifest -> no violations + if self._manifest is None: + return [] + + errors: list[VisibilityError] = [] + special_outcomes = SpecialOutcome.value_list() + + for bundle in self._bundles: + pipe_refs = bundle.collect_pipe_references() + for pipe_ref_str, context in pipe_refs: + # Skip special outcomes + if pipe_ref_str in special_outcomes: + continue + + # Try to parse as pipe ref + try: + ref = QualifiedRef.parse_pipe_ref(pipe_ref_str) + except QualifiedRefError: + continue + + if not self.is_pipe_accessible_from(ref, bundle.domain): + target_domain = ref.domain_path or "" + msg = ( + f"Pipe '{pipe_ref_str}' referenced in {context} (domain '{bundle.domain}') " + f"is not exported by domain '{target_domain}'. " + f"Add it to [exports.{target_domain}] pipes in METHODS.toml." + ) + errors.append( + VisibilityError( + pipe_ref=pipe_ref_str, + source_domain=bundle.domain, + target_domain=target_domain, + context=context, + message=msg, + ) + ) + + return errors + + def validate_cross_package_references(self) -> list[VisibilityError]: + """Validate cross-package references (using '->' syntax). + + Checks that: + - If a ref contains '->' and the alias IS in dependencies -> emit warning (not error) + - If a ref contains '->' and the alias is NOT in dependencies -> error + + Returns: + List of VisibilityError for unknown dependency aliases + """ + if self._manifest is None: + return [] + + # Build alias lookup from manifest dependencies + known_aliases: set[str] = {dep.alias for dep in self._manifest.dependencies} + + errors: list[VisibilityError] = [] + + for bundle in self._bundles: + pipe_refs = bundle.collect_pipe_references() + for pipe_ref_str, context in pipe_refs: + if not QualifiedRef.has_cross_package_prefix(pipe_ref_str): + continue + + alias, _remainder = QualifiedRef.split_cross_package_ref(pipe_ref_str) + + if alias in known_aliases: + # Known alias -> informational (cross-package resolution is active) + log.info( + f"Cross-package reference '{pipe_ref_str}' in {context} (domain '{bundle.domain}'): alias '{alias}' is a known dependency." + ) + else: + # Unknown alias -> error + msg = ( + f"Cross-package reference '{pipe_ref_str}' in {context} " + f"(domain '{bundle.domain}'): alias '{alias}' is not declared " + "in [dependencies] of METHODS.toml." + ) + errors.append( + VisibilityError( + pipe_ref=pipe_ref_str, + source_domain=bundle.domain, + target_domain=alias, + context=context, + message=msg, + ) + ) + + return errors + + def validate_reserved_domains(self) -> list[VisibilityError]: + """Check that no bundle declares a domain starting with a reserved segment. + + Returns: + List of VisibilityError for each bundle using a reserved domain + """ + errors: list[VisibilityError] = [] + + for bundle in self._bundles: + if is_reserved_domain_path(bundle.domain): + first_segment = bundle.domain.split(".")[0] + msg = ( + f"Bundle domain '{bundle.domain}' uses reserved domain '{first_segment}'. " + f"Reserved domains ({', '.join(sorted(RESERVED_DOMAINS))}) cannot be used in user packages." + ) + errors.append( + VisibilityError( + pipe_ref="", + source_domain=bundle.domain, + target_domain=first_segment, + context="bundle domain declaration", + message=msg, + ) + ) + + return errors + + +def check_visibility_for_blueprints( + manifest: MthdsPackageManifest | None, + blueprints: list[PipelexBundleBlueprint], +) -> list[VisibilityError]: + """Convenience function: check visibility for a set of blueprints. + + Validates both intra-package cross-domain visibility and cross-package references. + + Args: + manifest: The package manifest (None means all-public) + blueprints: The bundle blueprints to check + + Returns: + List of visibility errors + """ + checker = PackageVisibilityChecker(manifest=manifest, bundles=blueprints) + errors = checker.validate_reserved_domains() + errors.extend(checker.validate_all_pipe_references()) + errors.extend(checker.validate_cross_package_references()) + return errors diff --git a/pipelex/core/pipes/pipe_output.py b/pipelex/core/pipes/pipe_output.py index cd8c837cb..23fa91b3f 100644 --- a/pipelex/core/pipes/pipe_output.py +++ b/pipelex/core/pipes/pipe_output.py @@ -1,8 +1,9 @@ from typing import TypeVar -from pydantic import BaseModel, Field +from mthds.models.pipe_output import PipeOutputAbstract +from pydantic import Field -from pipelex.core.memory.working_memory import DictWorkingMemory, WorkingMemory +from pipelex.core.memory.working_memory import WorkingMemory from pipelex.core.stuffs.html_content import HtmlContent from pipelex.core.stuffs.image_content import ImageContent from pipelex.core.stuffs.list_content import ListContent @@ -16,13 +17,7 @@ from pipelex.pipeline.pipeline_models import SpecialPipelineId -class DictPipeOutput(BaseModel): - working_memory: DictWorkingMemory - graph_spec: GraphSpec | None = None - pipeline_run_id: str - - -class PipeOutput(BaseModel): +class PipeOutput(PipeOutputAbstract[WorkingMemory]): working_memory: WorkingMemory = Field(default_factory=WorkingMemory) pipeline_run_id: str = Field(default=SpecialPipelineId.UNTITLED) graph_spec: GraphSpec | None = None diff --git a/pipelex/core/pipes/variable_multiplicity.py b/pipelex/core/pipes/variable_multiplicity.py index 101a9069d..3652fc889 100644 --- a/pipelex/core/pipes/variable_multiplicity.py +++ b/pipelex/core/pipes/variable_multiplicity.py @@ -8,7 +8,7 @@ VariableMultiplicity = bool | int -MUTLIPLICITY_PATTERN = r"^([a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)?)(?:\[(\d*)\])?$" +MUTLIPLICITY_PATTERN = r"^([a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)*)(?:\[(\d*)\])?$" class VariableMultiplicityResolution(BaseModel): @@ -77,8 +77,8 @@ def parse_concept_with_multiplicity(concept_ref_or_code: str) -> MultiplicityPar or if multiplicity is zero or negative (a pipe must produce at least one output) """ # Use strict pattern to validate identifier syntax - # Concept must start with letter/underscore, optional domain prefix, optional brackets - pattern = r"^([a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)?)(?:\[(\d*)\])?$" + # Concept must start with letter/underscore, with zero or more dotted domain segments, optional brackets + pattern = r"^([a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)*)(?:\[(\d*)\])?$" match = re.match(pattern, concept_ref_or_code) if not match: diff --git a/pipelex/core/qualified_ref.py b/pipelex/core/qualified_ref.py new file mode 100644 index 000000000..746944f5d --- /dev/null +++ b/pipelex/core/qualified_ref.py @@ -0,0 +1,187 @@ +from pydantic import BaseModel, ConfigDict + +from pipelex.tools.misc.string_utils import is_pascal_case, is_snake_case + + +class QualifiedRefError(ValueError): + """Raised when a qualified reference string is invalid.""" + + +class QualifiedRef(BaseModel): + """A domain-qualified reference to a concept or pipe. + + Concept ref: "legal.contracts.NonCompeteClause" -> domain_path="legal.contracts", local_code="NonCompeteClause" + Pipe ref: "scoring.compute_score" -> domain_path="scoring", local_code="compute_score" + Bare ref: "compute_score" -> domain_path=None, local_code="compute_score" + """ + + model_config = ConfigDict(frozen=True) + + domain_path: str | None = None + local_code: str + + @property + def is_qualified(self) -> bool: + return self.domain_path is not None + + @property + def full_ref(self) -> str: + if self.domain_path: + return f"{self.domain_path}.{self.local_code}" + return self.local_code + + @classmethod + def parse(cls, raw: str) -> "QualifiedRef": + """Split on last dot. No naming-convention check on local_code. + + Args: + raw: The raw reference string to parse + + Returns: + A QualifiedRef with domain_path and local_code + + Raises: + QualifiedRefError: If the raw string is empty, starts/ends with a dot, + or contains consecutive dots + """ + if not raw: + msg = "Qualified reference cannot be empty" + raise QualifiedRefError(msg) + if raw.startswith(".") or raw.endswith("."): + msg = f"Qualified reference '{raw}' must not start or end with a dot" + raise QualifiedRefError(msg) + if ".." in raw: + msg = f"Qualified reference '{raw}' must not contain consecutive dots" + raise QualifiedRefError(msg) + + if "." not in raw: + return cls(domain_path=None, local_code=raw) + + domain_path, local_code = raw.rsplit(".", maxsplit=1) + return cls(domain_path=domain_path, local_code=local_code) + + @classmethod + def parse_concept_ref(cls, raw: str) -> "QualifiedRef": + """Parse a concept ref. Validates domain_path segments are snake_case, local_code is PascalCase. + + Args: + raw: The raw concept reference string to parse + + Returns: + A QualifiedRef with validated domain_path and local_code + + Raises: + QualifiedRefError: If the ref is invalid + """ + ref = cls.parse(raw) + + if not is_pascal_case(ref.local_code): + msg = f"Concept code '{ref.local_code}' in reference '{raw}' must be PascalCase" + raise QualifiedRefError(msg) + + if ref.domain_path is not None: + for segment in ref.domain_path.split("."): + if not is_snake_case(segment): + msg = f"Domain segment '{segment}' in reference '{raw}' must be snake_case" + raise QualifiedRefError(msg) + + return ref + + @classmethod + def parse_pipe_ref(cls, raw: str) -> "QualifiedRef": + """Parse a pipe ref. Validates domain_path segments are snake_case, local_code is snake_case. + + Args: + raw: The raw pipe reference string to parse + + Returns: + A QualifiedRef with validated domain_path and local_code + + Raises: + QualifiedRefError: If the ref is invalid + """ + ref = cls.parse(raw) + + if not is_snake_case(ref.local_code): + msg = f"Pipe code '{ref.local_code}' in reference '{raw}' must be snake_case" + raise QualifiedRefError(msg) + + if ref.domain_path is not None: + for segment in ref.domain_path.split("."): + if not is_snake_case(segment): + msg = f"Domain segment '{segment}' in reference '{raw}' must be snake_case" + raise QualifiedRefError(msg) + + return ref + + @classmethod + def from_domain_and_code(cls, domain_path: str, local_code: str) -> "QualifiedRef": + """Build from already-known parts. + + Args: + domain_path: The domain path (e.g. "legal.contracts") + local_code: The local code (e.g. "NonCompeteClause" or "compute_score") + + Returns: + A QualifiedRef + """ + return cls(domain_path=domain_path, local_code=local_code) + + def is_local_to(self, domain: str) -> bool: + """True if this ref belongs to the given domain (same domain or bare). + + Args: + domain: The domain to check against + + Returns: + True if this ref is local to the given domain + """ + if self.domain_path is None: + return True + return self.domain_path == domain + + def is_external_to(self, domain: str) -> bool: + """True if this ref belongs to a different domain. + + Args: + domain: The domain to check against + + Returns: + True if this ref is qualified and points to a different domain + """ + if self.domain_path is None: + return False + return self.domain_path != domain + + @staticmethod + def has_cross_package_prefix(raw: str) -> bool: + """Check if a raw reference string contains the cross-package '->' prefix. + + Cross-package references look like: 'alias->domain.pipe_code' + + Args: + raw: The raw reference string to check + + Returns: + True if the string contains '->' + """ + return "->" in raw + + @staticmethod + def split_cross_package_ref(raw: str) -> tuple[str, str]: + """Split a cross-package reference into alias and remainder. + + Args: + raw: The raw reference string like 'alias->domain.pipe_code' + + Returns: + Tuple of (alias, remainder) where remainder is 'domain.pipe_code' + + Raises: + QualifiedRefError: If the string does not contain '->' + """ + if "->" not in raw: + msg = f"Reference '{raw}' is not a cross-package reference (no '->' found)" + raise QualifiedRefError(msg) + parts = raw.split("->", maxsplit=1) + return parts[0], parts[1] diff --git a/pipelex/core/stuffs/stuff.py b/pipelex/core/stuffs/stuff.py index 3be9721d7..9a1c81ec4 100644 --- a/pipelex/core/stuffs/stuff.py +++ b/pipelex/core/stuffs/stuff.py @@ -2,7 +2,8 @@ from typing import Any, cast, get_args, get_origin from kajson import kajson -from pydantic import ConfigDict, ValidationError +from mthds.models.stuff import DictStuffAbstract, StuffAbstract +from pydantic import ValidationError from typing_extensions import override from pipelex import log @@ -23,14 +24,7 @@ from pipelex.tools.typing.pydantic_utils import CustomBaseModel, format_pydantic_validation_error -class Stuff(PrettyRenderable, CustomBaseModel): - model_config = ConfigDict(extra="forbid", strict=True) - - stuff_code: str - stuff_name: str | None = None - concept: Concept - content: StuffContent - +class Stuff(PrettyRenderable, CustomBaseModel, StuffAbstract[Concept, StuffContent]): def make_artefact(self) -> StuffArtefact: """Create a Jinja2-compatible artefact from this Stuff. @@ -247,13 +241,9 @@ def pretty_print_stuff(self, title: str | None = None) -> None: self.content.pretty_print_content(title=title) -class DictStuff(CustomBaseModel): +class DictStuff(CustomBaseModel, DictStuffAbstract): """Stuff with content as dict[str, Any] instead of StuffContent. This is used for serialization where the content needs to be a plain dict. Has the exact same structure as Stuff but with dict content. """ - - model_config = ConfigDict(extra="forbid", strict=True) - concept: str - content: Any diff --git a/pipelex/core/stuffs/stuff_content.py b/pipelex/core/stuffs/stuff_content.py index 34067f0e0..56b670f07 100644 --- a/pipelex/core/stuffs/stuff_content.py +++ b/pipelex/core/stuffs/stuff_content.py @@ -1,7 +1,7 @@ -from abc import ABC from typing import Any, TypeVar, final from kajson import kajson +from mthds.models.stuff import StuffContentAbstract from rich.json import JSON from typing_extensions import override @@ -12,7 +12,7 @@ StuffContentType = TypeVar("StuffContentType", bound="StuffContent") -class StuffContent(PrettyRenderable, CustomBaseModel, ABC): +class StuffContent(PrettyRenderable, CustomBaseModel, StuffContentAbstract): @override def __str__(self) -> str: return self.rendered_plain() diff --git a/pipelex/core/stuffs/stuff_factory.py b/pipelex/core/stuffs/stuff_factory.py index 53e86c3c7..2fe782082 100644 --- a/pipelex/core/stuffs/stuff_factory.py +++ b/pipelex/core/stuffs/stuff_factory.py @@ -1,9 +1,9 @@ from typing import Any, cast import shortuuid +from mthds.models.pipeline_inputs import StuffContentOrData from pydantic import BaseModel, ValidationError, field_validator -from pipelex.client.protocol import StuffContentOrData from pipelex.core.concepts.concept import Concept from pipelex.core.concepts.concept_factory import ConceptFactory from pipelex.core.concepts.native.concept_native import NativeConceptCode @@ -110,7 +110,6 @@ def combine_stuffs( stuff_contents: dict[str, StuffContent], name: str | None = None, ) -> Stuff: - # TODO: Add unit tests for this method """Combine a dictionary of stuffs into a single stuff.""" the_subclass = get_class_registry().get_required_subclass(name=concept.structure_class_name, base_class=StuffContent) try: diff --git a/pipelex/exceptions.py b/pipelex/exceptions.py index c26c8704f..e53ac15e6 100644 --- a/pipelex/exceptions.py +++ b/pipelex/exceptions.py @@ -1,4 +1,3 @@ -from pipelex.client.exceptions import ClientAuthenticationError from pipelex.core.concepts.exceptions import ( ConceptCodeError, ConceptError, @@ -58,8 +57,6 @@ ) __all__ = [ - # from pipelex.client.exceptions - "ClientAuthenticationError", # from pipelex.core.domains.exceptions "DomainCodeError", # from pipelex.core.concepts.exceptions diff --git a/pipelex/graph/graph_tracer.py b/pipelex/graph/graph_tracer.py index 175f87d68..125273799 100644 --- a/pipelex/graph/graph_tracer.py +++ b/pipelex/graph/graph_tracer.py @@ -47,7 +47,7 @@ def __init__( self.metrics: dict[str, float] = {} self.error: ErrorSpec | None = None self.input_specs: list[IOSpec] = input_specs or [] - self.output_spec: IOSpec | None = None + self.output_specs: list[IOSpec] = [] def to_node_spec(self) -> NodeSpec: """Convert to immutable NodeSpec.""" @@ -59,9 +59,7 @@ def to_node_spec(self) -> NodeSpec: ) # Build NodeIOSpec from captured input/output specs - outputs: list[IOSpec] = [] - if self.output_spec is not None: - outputs = [self.output_spec] + outputs = list(self.output_specs) node_io = NodeIOSpec( inputs=self.input_specs, @@ -110,6 +108,11 @@ def __init__(self) -> None: # The batch_controller_node_id is tracked to ensure BATCH_AGGREGATE edges target the correct node # (the PipeBatch), not a parent controller that may later register as producer of the same stuff self._batch_aggregate_map: dict[str, tuple[str | None, list[tuple[str, int]]]] = {} + # Maps combined_stuff_code -> (parallel_controller_node_id, [(branch_stuff_code, branch_producer_node_id)]) + # Used to create PARALLEL_COMBINE edges from branch outputs to combined output + # The branch_producer_node_id is snapshotted at registration time, before register_controller_output + # overrides _stuff_producer_map to point branch stuff codes to the controller node + self._parallel_combine_map: dict[str, tuple[str, list[tuple[str, str]]]] = {} @property def is_active(self) -> bool: @@ -139,6 +142,7 @@ def setup( self._stuff_producer_map = {} self._batch_item_map = {} self._batch_aggregate_map = {} + self._parallel_combine_map = {} return GraphContext( graph_id=graph_id, @@ -164,6 +168,7 @@ def teardown(self) -> GraphSpec | None: self._generate_data_edges() self._generate_batch_item_edges() self._generate_batch_aggregate_edges() + self._generate_parallel_combine_edges() self._is_active = False @@ -187,6 +192,7 @@ def teardown(self) -> GraphSpec | None: self._stuff_producer_map = {} self._batch_item_map = {} self._batch_aggregate_map = {} + self._parallel_combine_map = {} return graph @@ -294,6 +300,26 @@ def _generate_batch_aggregate_edges(self) -> None: target_stuff_digest=output_list_stuff_code, ) + def _generate_parallel_combine_edges(self) -> None: + """Generate PARALLEL_COMBINE edges from branch output stuff nodes to the combined output stuff node. + + For each registered parallel combine, create edges from each branch output + to the combined output, showing how individual branch results are merged. + + Uses snapshotted branch producer node IDs captured during register_parallel_combine, + before register_controller_output overrides _stuff_producer_map. + """ + for combined_stuff_code, (parallel_controller_node_id, branch_entries) in self._parallel_combine_map.items(): + for branch_stuff_code, branch_producer_id in branch_entries: + if branch_producer_id != parallel_controller_node_id: + self.add_edge( + source_node_id=branch_producer_id, + target_node_id=parallel_controller_node_id, + edge_kind=EdgeKind.PARALLEL_COMBINE, + source_stuff_digest=branch_stuff_code, + target_stuff_digest=combined_stuff_code, + ) + @override def register_batch_item_extraction( self, @@ -348,6 +374,32 @@ def register_batch_aggregation( # Note: We keep the first batch_controller_node_id registered for this output list # (all items for the same output list should come from the same batch controller) + @override + def register_parallel_combine( + self, + combined_stuff_code: str, + branch_stuff_codes: list[str], + parallel_controller_node_id: str, + ) -> None: + """Register that branch outputs are combined into a single output in PipeParallel. + + Args: + combined_stuff_code: The stuff_code of the combined output. + branch_stuff_codes: The stuff_codes of the individual branch outputs. + parallel_controller_node_id: The node_id of the PipeParallel controller. + """ + if not self._is_active: + return + # Snapshot the current branch producers from _stuff_producer_map before + # register_controller_output overrides them to point to the controller node. + # This must be called BEFORE _register_branch_outputs_with_graph_tracer. + branch_entries: list[tuple[str, str]] = [] + for branch_code in branch_stuff_codes: + producer_id = self._stuff_producer_map.get(branch_code) + if producer_id: + branch_entries.append((branch_code, producer_id)) + self._parallel_combine_map[combined_stuff_code] = (parallel_controller_node_id, branch_entries) + @override def on_pipe_start( self, @@ -422,10 +474,45 @@ def on_pipe_end_success( # Store output spec and register in producer map for data flow tracking if output_spec is not None: - node_data.output_spec = output_spec - # Register this node as the producer of this stuff_code (digest) - if output_spec.digest: - self._stuff_producer_map[output_spec.digest] = node_id + # Skip pass-through outputs: if the output digest matches one of the node's + # input digests, the output is just the unchanged input flowing through + # (e.g., PipeParallel with add_each_output where main_stuff is the original input) + input_digests = {spec.digest for spec in node_data.input_specs if spec.digest is not None} + if output_spec.digest in input_digests: + # Pass-through: don't register as output or producer + pass + else: + node_data.output_specs.append(output_spec) + # Register this node as the producer of this stuff_code (digest) + if output_spec.digest: + self._stuff_producer_map[output_spec.digest] = node_id + + @override + def register_controller_output( + self, + node_id: str, + output_spec: IOSpec, + ) -> None: + """Register an additional output for a controller node. + + This allows controllers like PipeParallel to explicitly register their + branch outputs, overriding sub-pipe registrations in _stuff_producer_map + so that DATA edges flow from the controller to downstream consumers. + + Args: + node_id: The controller node ID. + output_spec: The IOSpec describing the output. + """ + if not self._is_active: + return + + node_data = self._nodes.get(node_id) + if node_data is None: + return + + node_data.output_specs.append(output_spec) + if output_spec.digest: + self._stuff_producer_map[output_spec.digest] = node_id @override def on_pipe_end_error( diff --git a/pipelex/graph/graph_tracer_manager.py b/pipelex/graph/graph_tracer_manager.py index f2cdf238b..3b4d770df 100644 --- a/pipelex/graph/graph_tracer_manager.py +++ b/pipelex/graph/graph_tracer_manager.py @@ -298,6 +298,27 @@ def add_edge( label=label, ) + def register_controller_output( + self, + graph_id: str, + node_id: str, + output_spec: IOSpec, + ) -> None: + """Register an additional output for a controller node. + + Args: + graph_id: The graph identifier. + node_id: The controller node ID. + output_spec: The IOSpec describing the output. + """ + tracer = self._get_tracer(graph_id) + if tracer is None: + return + tracer.register_controller_output( + node_id=node_id, + output_spec=output_spec, + ) + def register_batch_item_extraction( self, graph_id: str, @@ -353,3 +374,27 @@ def register_batch_aggregation( item_index=item_index, batch_controller_node_id=batch_controller_node_id, ) + + def register_parallel_combine( + self, + graph_id: str, + combined_stuff_code: str, + branch_stuff_codes: list[str], + parallel_controller_node_id: str, + ) -> None: + """Register that branch outputs are combined into a single output in PipeParallel. + + Args: + graph_id: The graph identifier. + combined_stuff_code: The stuff_code of the combined output. + branch_stuff_codes: The stuff_codes of the individual branch outputs. + parallel_controller_node_id: The node_id of the PipeParallel controller. + """ + tracer = self._get_tracer(graph_id) + if tracer is None: + return + tracer.register_parallel_combine( + combined_stuff_code=combined_stuff_code, + branch_stuff_codes=branch_stuff_codes, + parallel_controller_node_id=parallel_controller_node_id, + ) diff --git a/pipelex/graph/graph_tracer_protocol.py b/pipelex/graph/graph_tracer_protocol.py index 213e342cd..adbb217a8 100644 --- a/pipelex/graph/graph_tracer_protocol.py +++ b/pipelex/graph/graph_tracer_protocol.py @@ -126,6 +126,22 @@ def add_edge( """ ... + def register_controller_output( + self, + node_id: str, + output_spec: IOSpec, + ) -> None: + """Register an additional output for a controller node. + + This allows controllers like PipeParallel to explicitly register their + branch outputs so that DATA edges flow from the controller to downstream consumers. + + Args: + node_id: The controller node ID. + output_spec: The IOSpec describing the output. + """ + ... + def register_batch_item_extraction( self, list_stuff_code: str, @@ -163,6 +179,24 @@ def register_batch_aggregation( """ ... + def register_parallel_combine( + self, + combined_stuff_code: str, + branch_stuff_codes: list[str], + parallel_controller_node_id: str, + ) -> None: + """Register that branch outputs are combined into a single output in PipeParallel. + + Creates PARALLEL_COMBINE edges from each branch output stuff node + to the combined output stuff node. + + Args: + combined_stuff_code: The stuff_code of the combined output. + branch_stuff_codes: The stuff_codes of the individual branch outputs. + parallel_controller_node_id: The node_id of the PipeParallel controller. + """ + ... + class GraphTracerNoOp(GraphTracerProtocol): """No-operation implementation of GraphTracerProtocol. @@ -235,6 +269,14 @@ def add_edge( ) -> None: pass + @override + def register_controller_output( + self, + node_id: str, + output_spec: IOSpec, + ) -> None: + pass + @override def register_batch_item_extraction( self, @@ -254,3 +296,12 @@ def register_batch_aggregation( batch_controller_node_id: str | None = None, ) -> None: pass + + @override + def register_parallel_combine( + self, + combined_stuff_code: str, + branch_stuff_codes: list[str], + parallel_controller_node_id: str, + ) -> None: + pass diff --git a/pipelex/graph/graphspec.py b/pipelex/graph/graphspec.py index a7d4c440c..66892c827 100644 --- a/pipelex/graph/graphspec.py +++ b/pipelex/graph/graphspec.py @@ -49,13 +49,21 @@ class EdgeKind(StrEnum): SELECTED_OUTCOME = "selected_outcome" BATCH_ITEM = "batch_item" # list β†’ item extraction during batch iteration BATCH_AGGREGATE = "batch_aggregate" # items β†’ output list aggregation + PARALLEL_COMBINE = "parallel_combine" # branch outputs β†’ combined output in PipeParallel @property def is_data(self) -> bool: match self: case EdgeKind.DATA: return True - case EdgeKind.CONTROL | EdgeKind.CONTAINS | EdgeKind.SELECTED_OUTCOME | EdgeKind.BATCH_ITEM | EdgeKind.BATCH_AGGREGATE: + case ( + EdgeKind.CONTROL + | EdgeKind.CONTAINS + | EdgeKind.SELECTED_OUTCOME + | EdgeKind.BATCH_ITEM + | EdgeKind.BATCH_AGGREGATE + | EdgeKind.PARALLEL_COMBINE + ): return False @property @@ -63,7 +71,14 @@ def is_contains(self) -> bool: match self: case EdgeKind.CONTAINS: return True - case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.SELECTED_OUTCOME | EdgeKind.BATCH_ITEM | EdgeKind.BATCH_AGGREGATE: + case ( + EdgeKind.CONTROL + | EdgeKind.DATA + | EdgeKind.SELECTED_OUTCOME + | EdgeKind.BATCH_ITEM + | EdgeKind.BATCH_AGGREGATE + | EdgeKind.PARALLEL_COMBINE + ): return False @property @@ -71,7 +86,7 @@ def is_selected_outcome(self) -> bool: match self: case EdgeKind.SELECTED_OUTCOME: return True - case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.CONTAINS | EdgeKind.BATCH_ITEM | EdgeKind.BATCH_AGGREGATE: + case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.CONTAINS | EdgeKind.BATCH_ITEM | EdgeKind.BATCH_AGGREGATE | EdgeKind.PARALLEL_COMBINE: return False @property @@ -79,7 +94,14 @@ def is_batch_item(self) -> bool: match self: case EdgeKind.BATCH_ITEM: return True - case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.CONTAINS | EdgeKind.SELECTED_OUTCOME | EdgeKind.BATCH_AGGREGATE: + case ( + EdgeKind.CONTROL + | EdgeKind.DATA + | EdgeKind.CONTAINS + | EdgeKind.SELECTED_OUTCOME + | EdgeKind.BATCH_AGGREGATE + | EdgeKind.PARALLEL_COMBINE + ): return False @property @@ -87,7 +109,15 @@ def is_batch_aggregate(self) -> bool: match self: case EdgeKind.BATCH_AGGREGATE: return True - case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.CONTAINS | EdgeKind.SELECTED_OUTCOME | EdgeKind.BATCH_ITEM: + case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.CONTAINS | EdgeKind.SELECTED_OUTCOME | EdgeKind.BATCH_ITEM | EdgeKind.PARALLEL_COMBINE: + return False + + @property + def is_parallel_combine(self) -> bool: + match self: + case EdgeKind.PARALLEL_COMBINE: + return True + case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.CONTAINS | EdgeKind.SELECTED_OUTCOME | EdgeKind.BATCH_ITEM | EdgeKind.BATCH_AGGREGATE: return False diff --git a/pipelex/graph/mermaidflow/mermaidflow_factory.py b/pipelex/graph/mermaidflow/mermaidflow_factory.py index b84a723fb..7a3159c05 100644 --- a/pipelex/graph/mermaidflow/mermaidflow_factory.py +++ b/pipelex/graph/mermaidflow/mermaidflow_factory.py @@ -13,6 +13,7 @@ from pipelex.graph.graph_analysis import GraphAnalysis from pipelex.graph.graph_config import GraphConfig from pipelex.graph.graphspec import ( + EdgeSpec, GraphSpec, NodeKind, NodeSpec, @@ -125,6 +126,23 @@ def make_from_graphspec( # This allows batch item stuffs to be placed inside their consumer's subgraph rendered_orphan_stuffs: set[str] = set() + # Build mapping of controller node_id β†’ {digest: (name, concept)} for parallel_combine + # target stuffs. These are outputs of parallel controllers and should be rendered + # inside the controller's subgraph rather than as orphans at top level. + # We collect the stuff info from controller node outputs directly, because these + # stuffs may not be in stuff_registry (which skips controller nodes). + controller_output_stuffs: dict[str, dict[str, tuple[str, str | None]]] = {} + for edge in graph.edges: + if edge.kind.is_parallel_combine and edge.target_stuff_digest: + controller_output_stuffs.setdefault(edge.target, {})[edge.target_stuff_digest] = ("", None) + # Resolve names and concepts from the controller nodes' outputs + for controller_id, digest_map in controller_output_stuffs.items(): + controller_node = analysis.nodes_by_id.get(controller_id) + if controller_node: + for output_spec in controller_node.node_io.outputs: + if output_spec.digest and output_spec.digest in digest_map: + digest_map[output_spec.digest] = (output_spec.name, output_spec.concept) + # Render pipe nodes and their produced stuff within controller subgraphs lines.append("") lines.append(" %% Pipe and stuff nodes within controller subgraphs") @@ -141,6 +159,7 @@ def make_from_graphspec( subgraph_depths=subgraph_depths, show_stuff_codes=show_stuff_codes, rendered_orphan_stuffs=rendered_orphan_stuffs, + controller_output_stuffs=controller_output_stuffs, ) lines.extend(node_lines) @@ -199,6 +218,15 @@ def make_from_graphspec( ) lines.append(stuff_line) + # Build supplementary stuff info from all nodes (including controllers) + # This is needed for batch_aggregate target_stuff_digest which may not be in stuff_registry + # (GraphAnalysis.stuff_registry skips controller outputs) + all_stuff_info: dict[str, tuple[str, str | None]] = {} + for node in graph.nodes: + for output_spec in node.node_io.outputs: + if output_spec.digest and output_spec.digest not in all_stuff_info: + all_stuff_info[output_spec.digest] = (output_spec.name, output_spec.concept) + # Render edges: producer -> stuff lines.append("") lines.append(" %% Data flow edges: producer -> stuff -> consumer") @@ -220,26 +248,24 @@ def make_from_graphspec( lines.append(f" {cons_stuff_mermaid_id} --> {consumer_mermaid_id}") # Render batch edges (BATCH_ITEM and BATCH_AGGREGATE) with dashed styling + # These edges connect stuff-to-stuff (not node-to-node) because their source/target + # are controllers rendered as Mermaid subgraphs, not nodes. batch_item_edges = [edge for edge in graph.edges if edge.kind.is_batch_item] batch_aggregate_edges = [edge for edge in graph.edges if edge.kind.is_batch_aggregate] if batch_item_edges or batch_aggregate_edges: lines.append("") lines.append(" %% Batch edges: list-item relationships") + cls._render_dashed_edges(batch_item_edges, lines, stuff_id_mapping, all_stuff_info, show_stuff_codes) + cls._render_dashed_edges(batch_aggregate_edges, lines, stuff_id_mapping, all_stuff_info, show_stuff_codes) - for edge in batch_item_edges: - source_mermaid_id = id_mapping.get(edge.source) - target_mermaid_id = id_mapping.get(edge.target) - if source_mermaid_id and target_mermaid_id: - label = edge.label or "" - lines.append(f' {source_mermaid_id} -."{label}".-> {target_mermaid_id}') - - for edge in batch_aggregate_edges: - source_mermaid_id = id_mapping.get(edge.source) - target_mermaid_id = id_mapping.get(edge.target) - if source_mermaid_id and target_mermaid_id: - label = edge.label or "" - lines.append(f' {source_mermaid_id} -."{label}".-> {target_mermaid_id}') + # Render parallel combine edges (branch outputs β†’ combined output) with dashed styling + # Same approach: use stuff digests to connect stuff-to-stuff. + parallel_combine_edges = [edge for edge in graph.edges if edge.kind.is_parallel_combine] + if parallel_combine_edges: + lines.append("") + lines.append(" %% Parallel combine edges: branch outputs β†’ combined output") + cls._render_dashed_edges(parallel_combine_edges, lines, stuff_id_mapping, all_stuff_info, show_stuff_codes) # Style definitions lines.append("") @@ -381,6 +407,65 @@ def _render_stuff_node( return f'{indent}{stuff_mermaid_id}(["{label}"]):::stuff' + @classmethod + def _render_dashed_edges( + cls, + edges: list[EdgeSpec], + lines: list[str], + stuff_id_mapping: dict[str, str], + all_stuff_info: dict[str, tuple[str, str | None]], + show_stuff_codes: bool, + ) -> None: + """Render dashed edges between stuff nodes, resolving missing stuff nodes on the fly. + + This handles BATCH_ITEM, BATCH_AGGREGATE, and PARALLEL_COMBINE edges which all share + the same rendering logic: look up source/target stuff IDs, render any missing stuff + nodes from all_stuff_info, and emit a dashed arrow with an optional label. + + Args: + edges: The edges to render as dashed arrows. + lines: The mermaid output lines list (mutated). + stuff_id_mapping: Map to store/retrieve stuff mermaid IDs (mutated). + all_stuff_info: Supplementary stuff info from all nodes including controllers. + show_stuff_codes: Whether to show digest in stuff labels. + """ + for edge in edges: + source_sid = stuff_id_mapping.get(edge.source_stuff_digest) if edge.source_stuff_digest else None + target_sid = stuff_id_mapping.get(edge.target_stuff_digest) if edge.target_stuff_digest else None + # Render missing stuff nodes on the fly + if not source_sid and edge.source_stuff_digest and edge.source_stuff_digest in all_stuff_info: + name, concept = all_stuff_info[edge.source_stuff_digest] + lines.append( + cls._render_stuff_node( + digest=edge.source_stuff_digest, + name=name, + concept=concept, + stuff_id_mapping=stuff_id_mapping, + show_stuff_codes=show_stuff_codes, + indent=" ", + ) + ) + source_sid = stuff_id_mapping.get(edge.source_stuff_digest) + if not target_sid and edge.target_stuff_digest and edge.target_stuff_digest in all_stuff_info: + name, concept = all_stuff_info[edge.target_stuff_digest] + lines.append( + cls._render_stuff_node( + digest=edge.target_stuff_digest, + name=name, + concept=concept, + stuff_id_mapping=stuff_id_mapping, + show_stuff_codes=show_stuff_codes, + indent=" ", + ) + ) + target_sid = stuff_id_mapping.get(edge.target_stuff_digest) + if source_sid and target_sid: + label = edge.label or "" + if label: + lines.append(f' {source_sid} -."{label}".-> {target_sid}') + else: + lines.append(f" {source_sid} -.-> {target_sid}") + @classmethod def _render_subgraph_recursive( cls, @@ -395,6 +480,7 @@ def _render_subgraph_recursive( subgraph_depths: dict[str, int], show_stuff_codes: bool, rendered_orphan_stuffs: set[str], + controller_output_stuffs: dict[str, dict[str, tuple[str, str | None]]], indent_level: int = 1, depth: int = 0, ) -> list[str]: @@ -403,6 +489,8 @@ def _render_subgraph_recursive( This renders both pipe nodes and their produced stuff nodes inside subgraphs. Orphan stuffs (no producer) consumed by leaf nodes are also rendered inside the same subgraph as their consumer, enabling proper placement of batch item stuffs. + Controller output stuffs (e.g., parallel_combine targets) are rendered inside + their controller's subgraph. Args: node_id: The node to render. @@ -416,6 +504,7 @@ def _render_subgraph_recursive( subgraph_depths: Map to track subgraph IDs and their depths (mutated). show_stuff_codes: Whether to show digest in stuff labels. rendered_orphan_stuffs: Set of orphan stuff digests already rendered (mutated). + controller_output_stuffs: Map of controller node_id to {digest: (name, concept)} for stuffs to render inside. indent_level: Current indentation level. depth: Current depth in the subgraph hierarchy (for coloring). @@ -464,11 +553,26 @@ def _render_subgraph_recursive( subgraph_depths=subgraph_depths, show_stuff_codes=show_stuff_codes, rendered_orphan_stuffs=rendered_orphan_stuffs, + controller_output_stuffs=controller_output_stuffs, indent_level=indent_level + 1, depth=depth + 1, ) lines.extend(child_lines) + # Render controller output stuffs (e.g., parallel_combine targets) inside the subgraph + for digest, (name, concept) in sorted(controller_output_stuffs.get(node_id, {}).items(), key=lambda item: item[1][0]): + if digest not in stuff_id_mapping: + stuff_line = cls._render_stuff_node( + digest=digest, + name=name, + concept=concept, + stuff_id_mapping=stuff_id_mapping, + show_stuff_codes=show_stuff_codes, + indent=indent + " ", + ) + lines.append(stuff_line) + rendered_orphan_stuffs.add(digest) + lines.append(f"{indent}end") else: # Leaf node - render as simple node diff --git a/pipelex/graph/reactflow/templates/_scripts.js.jinja2 b/pipelex/graph/reactflow/templates/_scripts.js.jinja2 index 5a8d550a1..ecb59bdc7 100644 --- a/pipelex/graph/reactflow/templates/_scripts.js.jinja2 +++ b/pipelex/graph/reactflow/templates/_scripts.js.jinja2 @@ -435,6 +435,33 @@ function buildDataflowGraph(graphspec, analysis) { // batch_item: batch_controller β†’ stuff_item, batch_aggregate: stuff_item β†’ batch_controller // (showBatchController is declared earlier in the function) + // Create PARALLEL_COMBINE edges from GraphSpec + // These show branch outputs flowing into the combined output + for (const edge of graphspec.edges) { + if (edge.kind !== 'parallel_combine') continue; + + if (!edge.source_stuff_digest || !edge.target_stuff_digest) continue; + const sourceId = `stuff_${edge.source_stuff_digest}`; + const targetId = `stuff_${edge.target_stuff_digest}`; + + edges.push({ + id: edge.id, + source: sourceId, + target: targetId, + type: {{ edge_type | tojson }}, + animated: false, + style: { + stroke: 'var(--color-parallel-combine)', + strokeWidth: 2, + strokeDasharray: '5,5', + }, + markerEnd: { + type: MarkerType?.ArrowClosed || 'arrowclosed', + color: 'var(--color-parallel-combine)', + }, + }); + } + for (const edge of graphspec.edges) { if (edge.kind !== 'batch_item' && edge.kind !== 'batch_aggregate') { continue; diff --git a/pipelex/graph/reactflow/templates/_styles.css.jinja2 b/pipelex/graph/reactflow/templates/_styles.css.jinja2 index f75fddf7e..8944aa678 100644 --- a/pipelex/graph/reactflow/templates/_styles.css.jinja2 +++ b/pipelex/graph/reactflow/templates/_styles.css.jinja2 @@ -27,6 +27,7 @@ --color-edge: #3b82f6; --color-batch-item: #a855f7; --color-batch-aggregate: #22c55e; + --color-parallel-combine: #c084fc; --font-sans: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif; --font-mono: 'JetBrains Mono', 'Monaco', 'Menlo', monospace; --radius-sm: 4px; @@ -66,6 +67,7 @@ --color-edge: #3b82f6; --color-batch-item: #9333ea; --color-batch-aggregate: #16a34a; + --color-parallel-combine: #a855f7; --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.05); --shadow-md: 0 4px 12px rgba(0, 0, 0, 0.1); --shadow-lg: 0 8px 24px rgba(0, 0, 0, 0.15); @@ -100,6 +102,7 @@ --color-edge: #3b82f6; --color-batch-item: #9333ea; --color-batch-aggregate: #16a34a; + --color-parallel-combine: #a855f7; --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.05); --shadow-md: 0 4px 12px rgba(0, 0, 0, 0.1); --shadow-lg: 0 8px 24px rgba(0, 0, 0, 0.15); @@ -111,7 +114,7 @@ /* Dracula palette - vibrant dark theme with high contrast */ [data-palette="dracula"] { - /* Pipes / Execution Units - Salmon red (matches plx syntax highlighting) */ + /* Pipes / Execution Units - Salmon red (matches MTHDS syntax highlighting) */ --color-pipe: #ff6b6b; --color-pipe-bg: rgba(224, 108, 117, 0.18); --color-pipe-text: #ffffff; @@ -127,6 +130,7 @@ --color-edge: #FFFACD; --color-batch-item: #bd93f9; --color-batch-aggregate: #50fa7b; + --color-parallel-combine: #d6a4ff; /* Status colors */ --color-success: #50FA7B; /* Bright Green */ diff --git a/pipelex/graph/reactflow/viewspec_transformer.py b/pipelex/graph/reactflow/viewspec_transformer.py index 0af3ba5ad..e391dffbc 100644 --- a/pipelex/graph/reactflow/viewspec_transformer.py +++ b/pipelex/graph/reactflow/viewspec_transformer.py @@ -63,6 +63,8 @@ def _map_edge_kind_to_view_type(kind: EdgeKind) -> str: return "batch_item" case EdgeKind.BATCH_AGGREGATE: return "batch_aggregate" + case EdgeKind.PARALLEL_COMBINE: + return "parallel_combine" def _build_node_label(node_spec: Any) -> str: diff --git a/pipelex/hub.py b/pipelex/hub.py index 5349111b1..7ada0b9c0 100644 --- a/pipelex/hub.py +++ b/pipelex/hub.py @@ -523,7 +523,7 @@ def get_pipe_source(pipe_code: str) -> Path | None: pipe_code: The pipe code to look up. Returns: - Path to the .plx file the pipe was loaded from, or None if unknown. + Path to the .mthds file the pipe was loaded from, or None if unknown. """ return get_pipelex_hub().get_library_manager().get_pipe_source(pipe_code=pipe_code) diff --git a/pipelex/kit/agent_rules/codex_commands.md b/pipelex/kit/agent_rules/codex_commands.md index 3d3ef7fec..929839823 100644 --- a/pipelex/kit/agent_rules/codex_commands.md +++ b/pipelex/kit/agent_rules/codex_commands.md @@ -14,6 +14,7 @@ - Pyright: Static type checking - Ruff: Fix unused imports, lint, format - Mypy: Static type checker + - plxt: Format and lint TOML, MTHDS, and PLX files Always fix any issues reported by these tools before proceeding. @@ -50,6 +51,22 @@ For standard installations, the virtual environment is named `.venv`. Always check this first. On Windows, the path is `.venv\Scripts\` instead of `.venv/bin/`. +## Pipelex Dev CLI (`pipelex-dev`) + + The `pipelex-dev` CLI provides internal development tools that are not distributed with the package. It is available in the virtual environment. + + ```bash + .venv/bin/pipelex-dev --help + ``` + + Key commands: + + - **`generate-mthds-schema`**: Regenerate the MTHDS JSON Schema (`pipelex/language/mthds_schema.json`). Run this after modifying `mthds_schema_generator.py`. + + ```bash + .venv/bin/pipelex-dev generate-mthds-schema + ``` + ## Pipelex CLI Commands To run the Pipelex CLI commands without the logo, you can use the `--no-logo` flag, this will avoid useless tokens in the console output. diff --git a/pipelex/kit/agent_rules/commands.md b/pipelex/kit/agent_rules/commands.md index 8a7b78d0f..b1f04a8c6 100644 --- a/pipelex/kit/agent_rules/commands.md +++ b/pipelex/kit/agent_rules/commands.md @@ -14,6 +14,7 @@ - Pyright: Static type checking - Ruff: Fix unused imports, lint, format - Mypy: Static type checker + - plxt: Format and lint TOML, MTHDS, and PLX files Always fix any issues reported by these tools before proceeding. @@ -37,6 +38,8 @@ ## Running Tests with Prints + > **LOCAL ONLY**: The commands below are meant for a human developer running on their local machine. If you are an AI agent (Claude Code, Cursor, Codex, or any other agent running in the cloud or in a sandboxed environment), **do NOT use these commands**. Use `make agent-test` instead. + If anything went wrong, you can run the tests with prints to see the error: ```bash @@ -46,6 +49,8 @@ ## Running specific Tests + > **LOCAL ONLY**: The commands below are meant for a human developer running on their local machine. If you are an AI agent (Claude Code, Cursor, Codex, or any other agent running in the cloud or in a sandboxed environment), **do NOT use these commands**. Use `make agent-test` instead. + ```bash make tp TEST=TestClassName # or @@ -55,6 +60,8 @@ ## Running Last Failed Tests + > **LOCAL ONLY**: The commands below are meant for a human developer running on their local machine. If you are an AI agent (Claude Code, Cursor, Codex, or any other agent running in the cloud or in a sandboxed environment), **do NOT use these commands**. Use `make agent-test` instead. + To rerun only the tests that failed in the previous run, use: ```bash @@ -79,3 +86,19 @@ ``` For standard installations, the virtual environment is named `.venv`. Always check this first. On Windows, the path is `.venv\Scripts\` instead of `.venv/bin/`. + +## Pipelex Dev CLI (`pipelex-dev`) + + The `pipelex-dev` CLI provides internal development tools that are not distributed with the package. It is available in the virtual environment. + + ```bash + .venv/bin/pipelex-dev --help + ``` + + Key commands: + + - **`generate-mthds-schema`**: Regenerate the MTHDS JSON Schema (`pipelex/language/mthds_schema.json`). Run this after modifying `mthds_schema_generator.py`. + + ```bash + .venv/bin/pipelex-dev generate-mthds-schema + ``` diff --git a/pipelex/kit/agent_rules/pytest_standards.md b/pipelex/kit/agent_rules/pytest_standards.md index a37ff9f44..e7c6a1b41 100644 --- a/pipelex/kit/agent_rules/pytest_standards.md +++ b/pipelex/kit/agent_rules/pytest_standards.md @@ -11,7 +11,7 @@ NEVER EVER put more than one TestClass into a test module. - Place test files in the appropriate test category directory: - `tests/unit/` - for unit tests that test individual functions/classes in isolation - `tests/integration/` - for integration tests that test component interactions - - `tests/e2e/` - for end-to-end tests that test complete workflows + - `tests/e2e/` - for end-to-end tests that test complete methods - Do NOT add `__init__.py` files to test directories. Test directories do not need to be Python packages. - Fixtures are defined in conftest.py modules at different levels of the hierarchy, their scope is handled by pytest - Test data is placed inside test_data.py at different levels of the hierarchy, they must be imported with package paths from the root like `from tests.integration.pipelex.cogt.test_data`. Their content is all constants, regrouped inside classes to keep things tidy. diff --git a/pipelex/kit/configs/pipelex.toml b/pipelex/kit/configs/pipelex.toml index ed8859fcf..12516db51 100644 --- a/pipelex/kit/configs/pipelex.toml +++ b/pipelex/kit/configs/pipelex.toml @@ -101,19 +101,19 @@ signed_urls_lifespan_seconds = 3600 # Set to "disabled [pipelex.scan_config] # Directories to exclude when scanning for pipeline files excluded_dirs = [ - ".venv", - "venv", - "env", - ".env", - "virtualenv", - ".virtualenv", - ".git", - "__pycache__", - ".pytest_cache", - ".mypy_cache", - ".ruff_cache", - "node_modules", - "results", + ".venv", + "venv", + "env", + ".env", + "virtualenv", + ".virtualenv", + ".git", + "__pycache__", + ".pytest_cache", + ".mypy_cache", + ".ruff_cache", + "node_modules", + "results", ] #################################################################################################### @@ -189,4 +189,3 @@ is_dump_response_text_enabled = false is_dump_kwargs_enabled = false is_dump_response_enabled = false is_dump_error_enabled = false - diff --git a/pipelex/kit/configs/plxt.toml b/pipelex/kit/configs/plxt.toml new file mode 100644 index 000000000..c0a68b01f --- /dev/null +++ b/pipelex/kit/configs/plxt.toml @@ -0,0 +1,123 @@ +# ============================================================================= +# Pipelex TOML Configuration for pipelex-demo +# ============================================================================= +# Configures TOML/MTHDS formatting and linting behaviour for this project. +# Powered by the Pipelex extension (plxt / taplo engine). +# +# Docs: https://taplo.tamasfe.dev/configuration/ +# ============================================================================= + +# --------------------------------------------------------------------------- +# File discovery +# --------------------------------------------------------------------------- + +# Glob patterns for files to process. +include = ["**/*.toml", "**/*.mthds", "**/*.plx"] + +exclude = [ + ".venv/**", + ".mypy_cache/**", + ".ruff_cache/**", + ".pytest_cache/**", + "__pycache__/**", + "target/**", + "node_modules/**", + ".git/**", + "*.lock", +] # Glob patterns for files to ignore. +# These are evaluated relative to the config file location. + +# ============================================================================= +# Global formatting defaults +# ============================================================================= +# These apply to every file matched by `include` unless overridden by a +# [[rule]].formatting section below. Every option is shown at its built-in +# default so you can tune any of them in one place. + +[formatting] +align_entries = false # line up "=" signs across consecutive entries +align_comments = true # align end-of-line comments on consecutive lines +align_single_comments = true # also align lone comments (requires align_comments) +array_trailing_comma = true +array_auto_expand = true # go multiline when array exceeds column_width +array_auto_collapse = false # don't re-collapse multiline arrays that fit +inline_table_expand = true # expand inline tables exceeding column_width +compact_arrays = true # [1, 2] not [ 1, 2 ] +compact_inline_tables = false # keep spaces inside braces: { a = 1 } +compact_entries = false # keep spaces around "=": key = value +column_width = 80 +indent_tables = false +indent_entries = false +indent_string = " " +trailing_newline = true +reorder_keys = false +reorder_arrays = false +reorder_inline_tables = false +allowed_blank_lines = 2 +crlf = false + +# ============================================================================= +# Per-file-type rules +# ============================================================================= +# Each [[rule]] can narrow its scope with `include` / `exclude` globs and +# provide its own [rule.formatting] overrides. Options not listed here fall +# back to the global [formatting] section above. + + +# --------------------------------------------------------------------------- +# Rule: TOML files +# --------------------------------------------------------------------------- +[[rule]] +# Which files this rule applies to (relative globs). +include = ["**/*.toml"] + +# Per-rule formatting overrides β€” all at defaults so you can tweak them +# independently of .mthds files. +[rule.formatting] +# align_entries = false +# align_comments = true +# align_single_comments = true +# array_trailing_comma = true +# array_auto_expand = true +# array_auto_collapse = true +# inline_table_expand = true +# compact_arrays = true +# compact_inline_tables = false +# compact_entries = false +# column_width = 80 +# indent_tables = false +# indent_entries = false +# indent_string = " " +# trailing_newline = true +# allowed_blank_lines = 2 + + +# --------------------------------------------------------------------------- +# Rule: MTHDS files (Pipelex pipeline definitions) +# --------------------------------------------------------------------------- +[[rule]] +# Which files this rule applies to (relative globs). +include = ["**/*.mthds", "**/*.plx"] + +[rule.schema] +path = "pipelex/language/mthds_schema.json" + +# Per-rule formatting overrides β€” all at defaults so you can tweak them +# independently of .toml files. +[rule.formatting] +align_entries = true +# align_comments = true +# align_single_comments = true +# array_trailing_comma = true +# array_auto_expand = true +# array_auto_collapse = true +# inline_table_expand = true +# compact_arrays = true +# compact_inline_tables = false +# compact_entries = false +# column_width = 80 +# indent_tables = false +# indent_entries = false +# indent_string = " " +# trailing_newline = true +# allowed_blank_lines = 2 diff --git a/pipelex/kit/configs/telemetry.toml b/pipelex/kit/configs/telemetry.toml index eb2c5374d..ed9a90901 100644 --- a/pipelex/kit/configs/telemetry.toml +++ b/pipelex/kit/configs/telemetry.toml @@ -29,11 +29,11 @@ api_key = "${POSTHOG_API_KEY}" # Get from PostHog Project Settings geoip = true # Enable GeoIP lookup debug = false # Enable PostHog debug mode redact_properties = [ - "prompt", - "system_prompt", - "response", - "file_path", - "url", + "prompt", + "system_prompt", + "response", + "file_path", + "url", ] # Event properties to redact # AI span tracing to YOUR PostHog (does NOT affect Langfuse/OTLP - they receive full data) diff --git a/pipelex/kit/index.toml b/pipelex/kit/index.toml index abf566e10..64673ed43 100644 --- a/pipelex/kit/index.toml +++ b/pipelex/kit/index.toml @@ -8,12 +8,12 @@ default_set = "all" [agent_rules.sets] all = [ - "commands.md", - "python_standards.md", - "pipelex_standards.md", - "pytest_standards.md", - "docs.md", - "tdd.md", + "commands.md", + "python_standards.md", + "pipelex_standards.md", + "pytest_standards.md", + "docs.md", + "tdd.md", ] [agent_rules.cursor.front_matter] @@ -48,12 +48,12 @@ path = "AGENTS.md" [agent_rules.targets.agents.sets] all = [ - "codex_commands.md", - "python_standards.md", - "pipelex_standards.md", - "pytest_standards.md", - "docs.md", - "tdd.md", + "codex_commands.md", + "python_standards.md", + "pipelex_standards.md", + "pytest_standards.md", + "docs.md", + "tdd.md", ] [agent_rules.targets.claude] diff --git a/pipelex/language/mthds_config.py b/pipelex/language/mthds_config.py new file mode 100644 index 000000000..353009ad8 --- /dev/null +++ b/pipelex/language/mthds_config.py @@ -0,0 +1,28 @@ +from pipelex.system.configuration.config_model import ConfigModel + + +class MthdsConfigStrings(ConfigModel): + prefer_literal: bool + force_multiline: bool + length_limit_to_multiline: int + ensure_trailing_newline: bool + ensure_leading_blank_line: bool + + +class MthdsConfigInlineTables(ConfigModel): + spaces_inside_curly_braces: bool + + +class MthdsConfigForConcepts(ConfigModel): + structure_field_ordering: list[str] + + +class MthdsConfigForPipes(ConfigModel): + field_ordering: list[str] + + +class MthdsConfig(ConfigModel): + strings: MthdsConfigStrings + inline_tables: MthdsConfigInlineTables + concepts: MthdsConfigForConcepts + pipes: MthdsConfigForPipes diff --git a/pipelex/language/plx_factory.py b/pipelex/language/mthds_factory.py similarity index 95% rename from pipelex/language/plx_factory.py rename to pipelex/language/mthds_factory.py index ecc480091..236a12e68 100644 --- a/pipelex/language/plx_factory.py +++ b/pipelex/language/mthds_factory.py @@ -13,7 +13,7 @@ if TYPE_CHECKING: from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint - from pipelex.system.configuration.configs import PlxConfig + from pipelex.system.configuration.configs import MthdsConfig class SectionKey(StrEnum): @@ -27,10 +27,10 @@ class SectionKey(StrEnum): PIPE_CATEGORY_FIELD_KEY = "pipe_category" -class PlxFactory: +class MthdsFactory: @classmethod - def _plx_config(cls) -> PlxConfig: - return get_config().pipelex.plx_config + def _mthds_config(cls) -> MthdsConfig: + return get_config().pipelex.mthds_config @classmethod def format_tomlkit_string(cls, text: str) -> Any: # Can't type this because of tomlkit @@ -39,7 +39,7 @@ def format_tomlkit_string(cls, text: str) -> Any: # Can't type this because of - When multiline, `ensure_trailing_newline` puts the closing quotes on their own line. - When multiline, `ensure_leading_blank_line` inserts a real blank line at the start of the string. """ - strings_config = cls._plx_config().strings + strings_config = cls._mthds_config().strings needs_multiline = strings_config.force_multiline or ("\n" in text) or len(text) > strings_config.length_limit_to_multiline normalized = text @@ -144,7 +144,7 @@ def convert_mapping_to_table( else: # No field ordering provided, use original logic for field_key, field_value in mapping.items(): - # Skip the category field as it's not needed in PLX output (pipe metadata) + # Skip the category field as it's not needed in MTHDS output (pipe metadata) if field_key == PIPE_CATEGORY_FIELD_KEY: continue @@ -241,9 +241,9 @@ def make_template_table(cls, template_value: Mapping[str, Any]) -> Any: @classmethod def make_construct_table(cls, construct_value: Mapping[str, Any]) -> Any: - """Create a nested table for construct section in PLX format. + """Create a nested table for construct section in MTHDS format. - The construct_value should already be in PLX format (from ConstructBlueprint.to_plx_dict()) + The construct_value should already be in MTHDS format (from ConstructBlueprint.to_mthds_dict()) with field names at the root, not wrapped in a 'fields' key. """ tbl = table() @@ -265,7 +265,7 @@ def make_table_obj_for_pipe(cls, section_value: Mapping[str, Any]) -> Any: log.verbose(f"Field is a mapping: key = {field_key}, value = {field_value}") field_value = cast("Mapping[str, Any]", field_value) # Convert pipe configuration to table (handles template field specially) - table_obj.add(field_key, cls.convert_mapping_to_table(field_value, field_ordering=cls._plx_config().pipes.field_ordering)) + table_obj.add(field_key, cls.convert_mapping_to_table(field_value, field_ordering=cls._mthds_config().pipes.field_ordering)) return table_obj @classmethod @@ -314,7 +314,7 @@ def make_table_obj_for_concept(cls, section_value: Mapping[str, Any]) -> Any: structure_table_obj.add( structure_field_key, cls.convert_dicts_to_inline_tables( - value=filtered_value, field_ordering=cls._plx_config().concepts.structure_field_ordering + value=filtered_value, field_ordering=cls._mthds_config().concepts.structure_field_ordering ), ) concept_table_obj.add("structure", structure_table_obj) @@ -326,7 +326,7 @@ def make_table_obj_for_concept(cls, section_value: Mapping[str, Any]) -> Any: return table_obj @classmethod - def dict_to_plx_styled_toml(cls, data: Mapping[str, Any]) -> str: + def dict_to_mthds_styled_toml(cls, data: Mapping[str, Any]) -> str: """Top-level keys become tables; second-level mappings become tables; inline tables start at third level.""" log.verbose("=" * 100) data = remove_none_values_from_dict(data=data) @@ -355,16 +355,16 @@ def dict_to_plx_styled_toml(cls, data: Mapping[str, Any]) -> str: document_root.add(section_key, table_obj_for_concept) toml_output = tomlkit.dumps(document_root) # pyright: ignore[reportUnknownMemberType] - if cls._plx_config().inline_tables.spaces_inside_curly_braces: + if cls._mthds_config().inline_tables.spaces_inside_curly_braces: return cls.add_spaces_to_inline_tables(toml_output) return toml_output @classmethod - def make_plx_content(cls, blueprint: PipelexBundleBlueprint) -> str: - # Use context to signal PLX format serialization to ConstructBlueprint + def make_mthds_content(cls, blueprint: PipelexBundleBlueprint) -> str: + # Use context to signal MTHDS format serialization to ConstructBlueprint blueprint_dict = blueprint.model_dump( serialize_as_any=True, by_alias=True, - context={"format": "plx"}, + context={"format": "mthds"}, ) - return cls.dict_to_plx_styled_toml(data=blueprint_dict) + return cls.dict_to_mthds_styled_toml(data=blueprint_dict) diff --git a/pipelex/language/mthds_schema.json b/pipelex/language/mthds_schema.json new file mode 100644 index 000000000..3e1728f5a --- /dev/null +++ b/pipelex/language/mthds_schema.json @@ -0,0 +1,1707 @@ +{ + "additionalProperties": false, + "properties": { + "domain": { + "title": "Domain", + "type": "string" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "System Prompt" + }, + "main_pipe": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Main Pipe" + }, + "concept": { + "anyOf": [ + { + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/ConceptBlueprint" + }, + { + "type": "string" + } + ] + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Concept" + }, + "pipe": { + "anyOf": [ + { + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/PipeFuncBlueprint" + }, + { + "$ref": "#/definitions/PipeImgGenBlueprint" + }, + { + "$ref": "#/definitions/PipeComposeBlueprint" + }, + { + "$ref": "#/definitions/PipeLLMBlueprint" + }, + { + "$ref": "#/definitions/PipeExtractBlueprint" + }, + { + "$ref": "#/definitions/PipeBatchBlueprint" + }, + { + "$ref": "#/definitions/PipeConditionBlueprint" + }, + { + "$ref": "#/definitions/PipeParallelBlueprint" + }, + { + "$ref": "#/definitions/PipeSequenceBlueprint" + } + ] + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Pipe" + } + }, + "required": [ + "domain" + ], + "title": "MTHDS File Schema", + "type": "object", + "definitions": { + "AspectRatio": { + "enum": [ + "square", + "landscape_4_3", + "landscape_3_2", + "landscape_16_9", + "landscape_21_9", + "portrait_3_4", + "portrait_2_3", + "portrait_9_16", + "portrait_9_21" + ], + "title": "AspectRatio", + "type": "string" + }, + "Background": { + "enum": [ + "transparent", + "opaque", + "auto" + ], + "title": "Background", + "type": "string" + }, + "ConceptBlueprint": { + "additionalProperties": false, + "properties": { + "description": { + "title": "Description", + "type": "string" + }, + "structure": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/definitions/ConceptStructureBlueprint" + } + ] + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Structure" + }, + "refines": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Refines" + } + }, + "required": [ + "description" + ], + "title": "ConceptBlueprint", + "type": "object" + }, + "ConceptStructureBlueprint": { + "properties": { + "description": { + "title": "Description", + "type": "string" + }, + "type": { + "anyOf": [ + { + "$ref": "#/definitions/ConceptStructureBlueprintFieldType" + }, + { + "type": "null" + } + ], + "default": null + }, + "key_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Key Type" + }, + "value_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Value Type" + }, + "item_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Item Type" + }, + "concept_ref": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Concept Ref" + }, + "item_concept_ref": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Item Concept Ref" + }, + "choices": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Choices" + }, + "default_value": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "title": "Default Value" + }, + "required": { + "default": false, + "title": "Required", + "type": "boolean" + } + }, + "required": [ + "description" + ], + "title": "ConceptStructureBlueprint", + "type": "object" + }, + "ConceptStructureBlueprintFieldType": { + "enum": [ + "text", + "list", + "dict", + "integer", + "boolean", + "number", + "date", + "concept" + ], + "title": "ConceptStructureBlueprintFieldType", + "type": "string" + }, + "ConstructBlueprint": { + "title": "ConstructBlueprint", + "description": "Construct section defining how to compose a StructuredContent from working memory fields.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "type": "string", + "description": "Fixed string value" + }, + { + "type": "number", + "description": "Fixed numeric value" + }, + { + "type": "boolean", + "description": "Fixed boolean value" + }, + { + "type": "array", + "description": "Fixed array value" + }, + { + "type": "object", + "description": "Variable reference from working memory", + "properties": { + "from": { + "type": "string", + "description": "Path to variable in working memory" + }, + "list_to_dict_keyed_by": { + "type": "string", + "description": "Convert list to dict keyed by this attribute" + } + }, + "required": [ + "from" + ], + "additionalProperties": false + }, + { + "type": "object", + "description": "Jinja2 template string", + "properties": { + "template": { + "type": "string", + "description": "Jinja2 template string (with $ preprocessing)" + } + }, + "required": [ + "template" + ], + "additionalProperties": false + }, + { + "type": "object", + "description": "Nested construct", + "additionalProperties": { + "$ref": "#/definitions/ConstructFieldBlueprint" + }, + "minProperties": 1 + } + ] + }, + "minProperties": 1 + }, + "ConstructFieldBlueprint": { + "title": "ConstructFieldBlueprint", + "anyOf": [ + { + "type": "string", + "description": "Fixed string value" + }, + { + "type": "number", + "description": "Fixed numeric value" + }, + { + "type": "boolean", + "description": "Fixed boolean value" + }, + { + "type": "array", + "description": "Fixed array value" + }, + { + "type": "object", + "description": "Variable reference from working memory", + "properties": { + "from": { + "type": "string", + "description": "Path to variable in working memory" + }, + "list_to_dict_keyed_by": { + "type": "string", + "description": "Convert list to dict keyed by this attribute" + } + }, + "required": [ + "from" + ], + "additionalProperties": false + }, + { + "type": "object", + "description": "Jinja2 template string", + "properties": { + "template": { + "type": "string", + "description": "Jinja2 template string (with $ preprocessing)" + } + }, + "required": [ + "template" + ], + "additionalProperties": false + }, + { + "type": "object", + "description": "Nested construct", + "additionalProperties": { + "$ref": "#/definitions/ConstructFieldBlueprint" + }, + "minProperties": 1 + } + ] + }, + "ConstructFieldMethod": { + "description": "Method used to compose a field value.", + "enum": [ + "fixed", + "from_var", + "template", + "nested" + ], + "title": "ConstructFieldMethod", + "type": "string" + }, + "ExtractSetting": { + "additionalProperties": false, + "properties": { + "model": { + "title": "Model", + "type": "string" + }, + "max_nb_images": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Max Nb Images" + }, + "image_min_size": { + "anyOf": [ + { + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Image Min Size" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + } + }, + "required": [ + "model" + ], + "title": "ExtractSetting", + "type": "object" + }, + "ImageFormat": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "ImageFormat", + "type": "string" + }, + "ImgGenSetting": { + "additionalProperties": false, + "properties": { + "model": { + "title": "Model", + "type": "string" + }, + "quality": { + "anyOf": [ + { + "$ref": "#/definitions/Quality" + }, + { + "type": "null" + } + ], + "default": null + }, + "nb_steps": { + "anyOf": [ + { + "exclusiveMinimum": true, + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "default": null, + "title": "Nb Steps" + }, + "guidance_scale": { + "anyOf": [ + { + "exclusiveMinimum": true, + "type": "number", + "minimum": 0 + }, + { + "type": "null" + } + ], + "default": null, + "title": "Guidance Scale" + }, + "is_moderated": { + "default": false, + "title": "Is Moderated", + "type": "boolean" + }, + "safety_tolerance": { + "anyOf": [ + { + "maximum": 6, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Safety Tolerance" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + } + }, + "required": [ + "model" + ], + "title": "ImgGenSetting", + "type": "object" + }, + "LLMSetting": { + "additionalProperties": false, + "properties": { + "model": { + "title": "Model", + "type": "string" + }, + "temperature": { + "maximum": 1, + "minimum": 0, + "title": "Temperature", + "type": "number" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Max Tokens" + }, + "image_detail": { + "anyOf": [ + { + "$ref": "#/definitions/PromptImageDetail" + }, + { + "type": "null" + } + ], + "default": null + }, + "prompting_target": { + "anyOf": [ + { + "$ref": "#/definitions/PromptingTarget" + }, + { + "type": "null" + } + ], + "default": null + }, + "reasoning_effort": { + "anyOf": [ + { + "$ref": "#/definitions/ReasoningEffort" + }, + { + "type": "null" + } + ], + "default": null + }, + "reasoning_budget": { + "anyOf": [ + { + "exclusiveMinimum": true, + "type": "integer", + "minimum": 0 + }, + { + "type": "null" + } + ], + "default": null, + "title": "Reasoning Budget" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Description" + } + }, + "required": [ + "model", + "temperature" + ], + "title": "LLMSetting", + "type": "object" + }, + "ModelReference": { + "description": "A parsed model reference with explicit kind and name.\n\nArgs:\n kind: The type of reference (preset, alias, waterfall, or handle)\n name: The actual name of the model/preset/alias/waterfall (without prefix)\n raw: The original input string (for error messages)", + "properties": { + "kind": { + "$ref": "#/definitions/ModelReferenceKind" + }, + "name": { + "title": "Name", + "type": "string" + }, + "raw": { + "title": "Raw", + "type": "string" + } + }, + "required": [ + "kind", + "name", + "raw" + ], + "title": "ModelReference", + "type": "object" + }, + "ModelReferenceKind": { + "description": "The kind of model reference.", + "enum": [ + "preset", + "alias", + "waterfall", + "handle" + ], + "title": "ModelReferenceKind", + "type": "string" + }, + "PipeBatchBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeBatch", + "title": "Type", + "type": "string", + "enum": [ + "PipeBatch" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "branch_pipe_code": { + "title": "Branch Pipe Code", + "type": "string" + }, + "input_list_name": { + "title": "Input List Name", + "type": "string" + }, + "input_item_name": { + "title": "Input Item Name", + "type": "string" + } + }, + "required": [ + "description", + "output", + "branch_pipe_code", + "input_list_name", + "input_item_name" + ], + "title": "PipeBatchBlueprint", + "type": "object" + }, + "PipeComposeBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeCompose", + "title": "Type", + "type": "string", + "enum": [ + "PipeCompose" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "template": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/definitions/TemplateBlueprint" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Template" + }, + "construct": { + "anyOf": [ + { + "$ref": "#/definitions/ConstructBlueprint" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "description", + "output" + ], + "title": "PipeComposeBlueprint", + "type": "object" + }, + "PipeConditionBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeCondition", + "title": "Type", + "type": "string", + "enum": [ + "PipeCondition" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "expression_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Expression Template" + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Expression" + }, + "outcomes": { + "additionalProperties": { + "type": "string" + }, + "title": "Outcomes", + "type": "object" + }, + "default_outcome": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/definitions/SpecialOutcome" + } + ], + "title": "Default Outcome" + }, + "add_alias_from_expression_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Add Alias From Expression To" + } + }, + "required": [ + "description", + "output", + "default_outcome" + ], + "title": "PipeConditionBlueprint", + "type": "object" + }, + "PipeExtractBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeExtract", + "title": "Type", + "type": "string", + "enum": [ + "PipeExtract" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "model": { + "anyOf": [ + { + "$ref": "#/definitions/ExtractSetting" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ModelReference" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Model" + }, + "max_page_images": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Max Page Images" + }, + "page_image_captions": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Page Image Captions" + }, + "page_views": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Page Views" + }, + "page_views_dpi": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Page Views Dpi" + } + }, + "required": [ + "description", + "output" + ], + "title": "PipeExtractBlueprint", + "type": "object" + }, + "PipeFuncBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeFunc", + "title": "Type", + "type": "string", + "enum": [ + "PipeFunc" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "function_name": { + "description": "The name of the function to call.", + "title": "Function Name", + "type": "string" + } + }, + "required": [ + "description", + "output", + "function_name" + ], + "title": "PipeFuncBlueprint", + "type": "object" + }, + "PipeImgGenBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeImgGen", + "title": "Type", + "type": "string", + "enum": [ + "PipeImgGen" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "prompt": { + "title": "Prompt", + "type": "string" + }, + "negative_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Negative Prompt" + }, + "model": { + "anyOf": [ + { + "$ref": "#/definitions/ImgGenSetting" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ModelReference" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Model" + }, + "aspect_ratio": { + "anyOf": [ + { + "$ref": "#/definitions/AspectRatio" + }, + { + "type": "null" + } + ], + "default": null + }, + "is_raw": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Is Raw" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string", + "enum": [ + "auto" + ] + }, + { + "type": "null" + } + ], + "default": null, + "title": "Seed" + }, + "background": { + "anyOf": [ + { + "$ref": "#/definitions/Background" + }, + { + "type": "null" + } + ], + "default": null + }, + "output_format": { + "anyOf": [ + { + "$ref": "#/definitions/ImageFormat" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "description", + "output", + "prompt" + ], + "title": "PipeImgGenBlueprint", + "type": "object" + }, + "PipeLLMBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeLLM", + "title": "Type", + "type": "string", + "enum": [ + "PipeLLM" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "model": { + "anyOf": [ + { + "$ref": "#/definitions/LLMSetting" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ModelReference" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Model" + }, + "model_to_structure": { + "anyOf": [ + { + "$ref": "#/definitions/LLMSetting" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ModelReference" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Model To Structure" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "System Prompt" + }, + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Prompt" + }, + "structuring_method": { + "anyOf": [ + { + "$ref": "#/definitions/StructuringMethod" + }, + { + "type": "null" + } + ], + "default": null + } + }, + "required": [ + "description", + "output" + ], + "title": "PipeLLMBlueprint", + "type": "object" + }, + "PipeParallelBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeParallel", + "title": "Type", + "type": "string", + "enum": [ + "PipeParallel" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "branches": { + "items": { + "$ref": "#/definitions/SubPipeBlueprint" + }, + "title": "Branches", + "type": "array" + }, + "add_each_output": { + "default": false, + "title": "Add Each Output", + "type": "boolean" + }, + "combined_output": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Combined Output" + } + }, + "required": [ + "description", + "output", + "branches" + ], + "title": "PipeParallelBlueprint", + "type": "object" + }, + "PipeSequenceBlueprint": { + "additionalProperties": false, + "properties": { + "type": { + "default": "PipeSequence", + "title": "Type", + "type": "string", + "enum": [ + "PipeSequence" + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Inputs" + }, + "output": { + "title": "Output", + "type": "string" + }, + "steps": { + "items": { + "$ref": "#/definitions/SubPipeBlueprint" + }, + "title": "Steps", + "type": "array" + } + }, + "required": [ + "description", + "output", + "steps" + ], + "title": "PipeSequenceBlueprint", + "type": "object" + }, + "PromptImageDetail": { + "enum": [ + "high", + "low", + "auto" + ], + "title": "PromptImageDetail", + "type": "string" + }, + "PromptingTarget": { + "enum": [ + "openai", + "anthropic", + "mistral", + "gemini", + "fal" + ], + "title": "PromptingTarget", + "type": "string" + }, + "Quality": { + "enum": [ + "low", + "medium", + "high" + ], + "title": "Quality", + "type": "string" + }, + "ReasoningEffort": { + "enum": [ + "none", + "minimal", + "low", + "medium", + "high", + "max" + ], + "title": "ReasoningEffort", + "type": "string" + }, + "SpecialOutcome": { + "enum": [ + "fail", + "continue" + ], + "title": "SpecialOutcome", + "type": "string" + }, + "StructuringMethod": { + "enum": [ + "direct", + "preliminary_text" + ], + "title": "StructuringMethod", + "type": "string" + }, + "SubPipeBlueprint": { + "additionalProperties": false, + "properties": { + "pipe": { + "title": "Pipe", + "type": "string" + }, + "result": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Result" + }, + "nb_output": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Nb Output" + }, + "multiple_output": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Multiple Output" + }, + "batch_over": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Batch Over" + }, + "batch_as": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Batch As" + } + }, + "required": [ + "pipe" + ], + "title": "SubPipeBlueprint", + "type": "object" + }, + "TagStyle": { + "enum": [ + "no_tag", + "ticks", + "xml", + "square_brackets" + ], + "title": "TagStyle", + "type": "string" + }, + "TemplateBlueprint": { + "properties": { + "template": { + "description": "Raw template source", + "title": "Template", + "type": "string" + }, + "templating_style": { + "anyOf": [ + { + "$ref": "#/definitions/TemplatingStyle" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Style of prompting to use (typically for different LLMs)" + }, + "category": { + "$ref": "#/definitions/TemplateCategory", + "description": "Category of the template (could also be HTML, MARKDOWN, MERMAID, etc.), influences template rendering rules" + }, + "extra_context": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional context variables for template rendering", + "title": "Extra Context" + } + }, + "required": [ + "template", + "category" + ], + "title": "TemplateBlueprint", + "type": "object" + }, + "TemplateCategory": { + "enum": [ + "basic", + "expression", + "html", + "markdown", + "mermaid", + "llm_prompt", + "img_gen_prompt" + ], + "title": "TemplateCategory", + "type": "string" + }, + "TemplatingStyle": { + "properties": { + "tag_style": { + "$ref": "#/definitions/TagStyle" + }, + "text_format": { + "$ref": "#/definitions/TextFormat", + "default": "plain" + } + }, + "required": [ + "tag_style" + ], + "title": "TemplatingStyle", + "type": "object" + }, + "TextFormat": { + "enum": [ + "plain", + "markdown", + "html", + "json" + ], + "title": "TextFormat", + "type": "string" + } + }, + "$schema": "http://json-schema.org/draft-04/schema#", + "$comment": "Generated from PipelexBundleBlueprint v0.18.0b3. Do not edit manually.", + "x-taplo": { + "initKeys": [ + "domain" + ] + } +} diff --git a/pipelex/language/mthds_schema_generator.py b/pipelex/language/mthds_schema_generator.py new file mode 100644 index 000000000..921b80061 --- /dev/null +++ b/pipelex/language/mthds_schema_generator.py @@ -0,0 +1,275 @@ +"""Generator for JSON Schema from MTHDS blueprint classes. + +Produces a Taplo-compatible JSON Schema (Draft 4) from PipelexBundleBlueprint's +Pydantic v2 model schema. The generated schema enables IDE validation and +autocompletion for .mthds files in the vscode-pipelex extension. +""" + +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING, Any, cast + +if TYPE_CHECKING: + from collections.abc import Callable + +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.tools.misc.package_utils import get_package_version + +# Fields that are injected at load time, never written by users in .mthds files +_INTERNAL_FIELDS = {"source"} + +# Fields that are technical union discriminators, not user-facing +_PIPE_INTERNAL_FIELDS = {"pipe_category"} + +# Pipe definition names (as they appear in Pydantic schema $defs) +_PIPE_DEFINITION_NAMES = { + "PipeFuncBlueprint", + "PipeImgGenBlueprint", + "PipeComposeBlueprint", + "PipeLLMBlueprint", + "PipeExtractBlueprint", + "PipeBatchBlueprint", + "PipeConditionBlueprint", + "PipeParallelBlueprint", + "PipeSequenceBlueprint", +} + + +def generate_mthds_schema() -> dict[str, Any]: + """Generate a Taplo-compatible JSON Schema for .mthds files. + + Uses PipelexBundleBlueprint.model_json_schema() as the base, then applies + post-processing steps to make it compatible with Taplo (JSON Schema Draft 4) + and match the user-facing MTHDS file format. + + Returns: + A JSON Schema dict ready to be serialized to JSON. + """ + schema = PipelexBundleBlueprint.model_json_schema( + by_alias=True, + mode="validation", + ) + + schema = _remove_internal_fields(schema) + schema = _convert_to_draft4(schema) + schema = _patch_construct_schema(schema) + + return _add_taplo_metadata(schema) + + +def _remove_internal_fields(schema: dict[str, Any]) -> dict[str, Any]: + """Remove fields that users never write in .mthds files. + + - `source` is removed from all definitions (injected at load time) + - `pipe_category` is removed from pipe definitions (union discriminator) + """ + schema = copy.deepcopy(schema) + defs_key = "$defs" if "$defs" in schema else "definitions" + definitions = schema.get(defs_key, {}) + + # Remove 'source' from root properties + root_props = schema.get("properties", {}) + for field_name in _INTERNAL_FIELDS: + root_props.pop(field_name, None) + _remove_from_required(schema, _INTERNAL_FIELDS) + + # Remove internal fields from all definitions + for def_name, def_schema in definitions.items(): + props = def_schema.get("properties", {}) + for field_name in _INTERNAL_FIELDS: + props.pop(field_name, None) + _remove_from_required(def_schema, _INTERNAL_FIELDS) + + # Remove pipe_category only from pipe blueprint definitions + if def_name in _PIPE_DEFINITION_NAMES: + for field_name in _PIPE_INTERNAL_FIELDS: + props.pop(field_name, None) + _remove_from_required(def_schema, _PIPE_INTERNAL_FIELDS) + + return schema + + +def _remove_from_required(schema_obj: dict[str, Any], field_names: set[str]) -> None: + """Remove field names from a schema object's 'required' list.""" + required = schema_obj.get("required") + if required is not None: + schema_obj["required"] = [req for req in required if req not in field_names] + if not schema_obj["required"]: + del schema_obj["required"] + + +def _convert_to_draft4(schema: dict[str, Any]) -> dict[str, Any]: + """Convert JSON Schema from Pydantic's Draft 2020-12 to Draft 4 for Taplo. + + - Renames `$defs` to `definitions` + - Converts `const` to single-value `enum` + - Removes `discriminator` (not in Draft 4) + - Fixes `$ref` paths from `#/$defs/` to `#/definitions/` + - Converts `exclusiveMinimum`/`exclusiveMaximum` from number (Draft 6+) to boolean (Draft 4) + """ + schema = copy.deepcopy(schema) + + # Rename $defs to definitions + if "$defs" in schema: + schema["definitions"] = schema.pop("$defs") + + # Walk the schema tree to apply conversions + _walk_schema(schema, _draft4_visitor) + + return schema + + +def _draft4_visitor(node: dict[str, Any]) -> None: + """Visitor that converts Draft 2020-12 constructs to Draft 4.""" + # Convert const to single-value enum + if "const" in node: + node["enum"] = [node.pop("const")] + + # Remove discriminator (not in Draft 4) + node.pop("discriminator", None) + + # Fix $ref paths + if "$ref" in node: + ref_value = node["$ref"] + if isinstance(ref_value, str) and "#/$defs/" in ref_value: + node["$ref"] = ref_value.replace("#/$defs/", "#/definitions/") + + # Convert exclusiveMinimum/exclusiveMaximum from Draft 6+ (number) to Draft 4 (boolean) + # Draft 6+: "exclusiveMinimum": 0 β†’ Draft 4: "minimum": 0, "exclusiveMinimum": true + if "exclusiveMinimum" in node and not isinstance(node["exclusiveMinimum"], bool): + node["minimum"] = node["exclusiveMinimum"] + node["exclusiveMinimum"] = True + if "exclusiveMaximum" in node and not isinstance(node["exclusiveMaximum"], bool): + node["maximum"] = node["exclusiveMaximum"] + node["exclusiveMaximum"] = True + + +def _patch_construct_schema(schema: dict[str, Any]) -> dict[str, Any]: + """Patch ConstructBlueprint definition to match user-facing MTHDS format. + + In .mthds files, construct fields are written directly at root level: + [pipe.my_pipe.construct] + field_a = "value" + field_b = { from = "var_name" } + + But the Pydantic model wraps them in a `fields` dict. This patch replaces + the ConstructBlueprint definition with one that uses `additionalProperties` + to accept arbitrary field names with field-value schemas. + + Also replaces ConstructFieldBlueprint with a user-facing schema that accepts + the raw MTHDS formats: raw values, {from: str}, {template: str}, or nested constructs. + """ + schema = copy.deepcopy(schema) + definitions = schema.get("definitions", {}) + + # Build the user-facing field value schema (what goes in each construct field) + construct_field_schema = _build_construct_field_schema() + + # Replace ConstructBlueprint with MTHDS-format schema + if "ConstructBlueprint" in definitions: + definitions["ConstructBlueprint"] = { + "title": "ConstructBlueprint", + "description": "Construct section defining how to compose a StructuredContent from working memory fields.", + "type": "object", + "additionalProperties": construct_field_schema, + "minProperties": 1, + } + + # Replace ConstructFieldBlueprint with user-facing schema + if "ConstructFieldBlueprint" in definitions: + definitions["ConstructFieldBlueprint"] = { + "title": "ConstructFieldBlueprint", + **construct_field_schema, + } + + return schema + + +def _build_construct_field_schema() -> dict[str, Any]: + """Build a JSON Schema for a construct field value as written in MTHDS files. + + Matches the parsing logic in ConstructFieldBlueprint.make_from_raw(): + - Raw values (string, number, boolean, array): fixed value + - {from: str}: variable reference from working memory + - {from: str, list_to_dict_keyed_by: str}: variable ref with dict conversion + - {template: str}: Jinja2 template + - Object with other keys: nested construct (recursive) + """ + return { + "anyOf": [ + {"type": "string", "description": "Fixed string value"}, + {"type": "number", "description": "Fixed numeric value"}, + {"type": "boolean", "description": "Fixed boolean value"}, + {"type": "array", "description": "Fixed array value"}, + { + "type": "object", + "description": "Variable reference from working memory", + "properties": { + "from": {"type": "string", "description": "Path to variable in working memory"}, + "list_to_dict_keyed_by": { + "type": "string", + "description": "Convert list to dict keyed by this attribute", + }, + }, + "required": ["from"], + "additionalProperties": False, + }, + { + "type": "object", + "description": "Jinja2 template string", + "properties": { + "template": {"type": "string", "description": "Jinja2 template string (with $ preprocessing)"}, + }, + "required": ["template"], + "additionalProperties": False, + }, + { + "type": "object", + "description": "Nested construct", + "additionalProperties": {"$ref": "#/definitions/ConstructFieldBlueprint"}, + "minProperties": 1, + }, + ], + } + + +def _add_taplo_metadata(schema: dict[str, Any]) -> dict[str, Any]: + """Add Taplo-specific metadata and JSON Schema Draft 4 header. + + - Sets $schema to Draft 4 + - Adds title and version comment + - Adds x-taplo.initKeys on the root schema for better IDE experience + """ + schema = copy.deepcopy(schema) + + version = get_package_version() + + schema["$schema"] = "http://json-schema.org/draft-04/schema#" + schema["title"] = "MTHDS File Schema" + schema["$comment"] = f"Generated from PipelexBundleBlueprint v{version}. Do not edit manually." + + # x-taplo.initKeys suggests which keys to auto-insert when creating a new .mthds file + schema["x-taplo"] = { + "initKeys": ["domain"], + } + + return schema + + +def _walk_schema(node: dict[str, Any] | list[Any] | Any, visitor: Callable[[dict[str, Any]], None]) -> None: + """Recursively walk a JSON Schema tree, calling visitor on each dict node. + + Args: + node: Current node in the schema tree + visitor: Callable that receives each dict node for in-place modification + """ + if isinstance(node, dict): + typed_node = cast("dict[str, Any]", node) + visitor(typed_node) + for child_value in typed_node.values(): + _walk_schema(child_value, visitor) + elif isinstance(node, list): + typed_list = cast("list[Any]", node) + for child_item in typed_list: + _walk_schema(child_item, visitor) diff --git a/pipelex/language/plx_config.py b/pipelex/language/plx_config.py deleted file mode 100644 index 639fb40cc..000000000 --- a/pipelex/language/plx_config.py +++ /dev/null @@ -1,28 +0,0 @@ -from pipelex.system.configuration.config_model import ConfigModel - - -class PlxConfigStrings(ConfigModel): - prefer_literal: bool - force_multiline: bool - length_limit_to_multiline: int - ensure_trailing_newline: bool - ensure_leading_blank_line: bool - - -class PlxConfigInlineTables(ConfigModel): - spaces_inside_curly_braces: bool - - -class PlxConfigForConcepts(ConfigModel): - structure_field_ordering: list[str] - - -class PlxConfigForPipes(ConfigModel): - field_ordering: list[str] - - -class PlxConfig(ConfigModel): - strings: PlxConfigStrings - inline_tables: PlxConfigInlineTables - concepts: PlxConfigForConcepts - pipes: PlxConfigForPipes diff --git a/pipelex/libraries/concept/concept_library.py b/pipelex/libraries/concept/concept_library.py index 1ec371cc7..32a452bfa 100644 --- a/pipelex/libraries/concept/concept_library.py +++ b/pipelex/libraries/concept/concept_library.py @@ -1,3 +1,5 @@ +from typing import Any, Callable + from pydantic import Field, RootModel, model_validator from typing_extensions import override @@ -7,6 +9,7 @@ from pipelex.core.concepts.native.concept_native import NativeConceptCode from pipelex.core.concepts.validation import is_concept_ref_valid, validate_concept_ref_or_code from pipelex.core.domains.domain import SpecialDomain +from pipelex.core.qualified_ref import QualifiedRef from pipelex.libraries.concept.concept_library_abstract import ConceptLibraryAbstract from pipelex.libraries.concept.exceptions import ConceptLibraryError from pipelex.types import Self @@ -17,10 +20,22 @@ class ConceptLibrary(RootModel[ConceptLibraryRoot], ConceptLibraryAbstract): root: ConceptLibraryRoot = Field(default_factory=dict) + @override + def model_post_init(self, _context: Any) -> None: + self._concept_resolver: Callable[[str], Concept | None] | None = None + + def set_concept_resolver(self, resolver: Callable[[str], Concept | None]) -> None: + """Set a resolver callback for cross-package concept lookups. + + Args: + resolver: A callable that takes a concept ref and returns the Concept or None + """ + self._concept_resolver = resolver + @model_validator(mode="after") def validation_static(self): for concept in self.root.values(): - if concept.refines and concept.refines not in self.root: + if concept.refines and not QualifiedRef.has_cross_package_prefix(concept.refines) and concept.refines not in self.root: msg = f"Concept '{concept.code}' refines '{concept.refines}' but no concept with the code '{concept.refines}' exists" raise ConceptLibraryError(msg) return self @@ -82,7 +97,12 @@ def remove_concepts_by_concept_refs(self, concept_refs: list[str]) -> None: @override def is_compatible(self, tested_concept: Concept, wanted_concept: Concept, strict: bool = False) -> bool: - return Concept.are_concept_compatible(concept_1=tested_concept, concept_2=wanted_concept, strict=strict) + return Concept.are_concept_compatible( + concept_1=tested_concept, + concept_2=wanted_concept, + strict=strict, + concept_resolver=self._concept_resolver, + ) def get_optional_concept(self, concept_ref: str) -> Concept | None: return self.root.get(concept_ref) @@ -90,8 +110,18 @@ def get_optional_concept(self, concept_ref: str) -> Concept | None: @override def get_required_concept(self, concept_ref: str) -> Concept: """`concept_ref` can have the domain or not. If it doesn't have the domain, it is assumed to be native. - If it is not native and doesnt have a domain, it should raise an error + If it is not native and doesnt have a domain, it should raise an error. + Cross-package refs (alias->domain.Code) are looked up directly by key. """ + # Cross-package refs bypass format validation (direct dict lookup) + if QualifiedRef.has_cross_package_prefix(concept_ref): + the_concept = self.root.get(concept_ref) + if not the_concept: + alias, remainder = QualifiedRef.split_cross_package_ref(concept_ref) + msg = f"Cross-package concept '{remainder}' from dependency '{alias}' not found in the library. Is the dependency loaded?" + raise ConceptLibraryError(msg) + return the_concept + if not is_concept_ref_valid(concept_ref=concept_ref): msg = f"Concept string '{concept_ref}' is not a valid concept string" raise ConceptLibraryError(msg) @@ -122,6 +152,10 @@ def get_required_concept_from_concept_ref_or_code(self, concept_ref_or_code: str msg = f"Could not validate concept string or code '{concept_ref_or_code}': {exc}" raise ConceptLibraryError(msg) from exc + # Cross-package refs are looked up via get_required_concept which handles them + if QualifiedRef.has_cross_package_prefix(concept_ref_or_code): + return self.get_required_concept(concept_ref=concept_ref_or_code) + if NativeConceptCode.is_native_concept_ref_or_code(concept_ref_or_code=concept_ref_or_code): native_concept_ref = NativeConceptCode.get_validated_native_concept_ref(concept_ref_or_code=concept_ref_or_code) return self.get_native_concept(native_concept=NativeConceptCode(native_concept_ref.split(".")[1])) @@ -154,5 +188,18 @@ def get_required_concept_from_concept_ref_or_code(self, concept_ref_or_code: str raise ConceptLibraryConceptNotFoundError(msg) return found_concepts[0] + def add_dependency_concept(self, alias: str, concept: Concept) -> None: + """Add a concept from a dependency package with an aliased key. + + Args: + alias: The dependency alias + concept: The concept to add + """ + key = f"{alias}->{concept.concept_ref}" + if key in self.root: + msg = f"Dependency concept '{key}' already exists in the library" + raise ConceptLibraryError(msg) + self.root[key] = concept + def is_concept_exists(self, concept_ref: str) -> bool: return concept_ref in self.root diff --git a/pipelex/libraries/library.py b/pipelex/libraries/library.py index c1e9faf13..8e75668fc 100644 --- a/pipelex/libraries/library.py +++ b/pipelex/libraries/library.py @@ -1,8 +1,11 @@ from pathlib import Path +from typing import TYPE_CHECKING from pydantic import BaseModel, Field +from pipelex import log from pipelex.base_exceptions import PipelexUnexpectedError +from pipelex.core.qualified_ref import QualifiedRef from pipelex.libraries.concept.concept_library import ConceptLibrary from pipelex.libraries.concept.exceptions import ConceptLibraryError from pipelex.libraries.domain.domain_library import DomainLibrary @@ -12,6 +15,9 @@ from pipelex.pipe_controllers.pipe_controller import PipeController from pipelex.tools.typing.pydantic_utils import empty_list_factory_of +if TYPE_CHECKING: + from pipelex.core.concepts.concept import Concept + class Library(BaseModel): """A Library bundles together domain, concept, and pipe libraries for a specific context. @@ -27,7 +33,8 @@ class Library(BaseModel): domain_library: DomainLibrary concept_library: ConceptLibrary pipe_library: PipeLibrary - loaded_plx_paths: list[Path] = Field(default_factory=empty_list_factory_of(Path)) + loaded_mthds_paths: list[Path] = Field(default_factory=empty_list_factory_of(Path)) + dependency_libraries: dict[str, "Library"] = Field(default_factory=dict) def get_domain_library(self) -> DomainLibrary: return self.domain_library @@ -38,11 +45,47 @@ def get_concept_library(self) -> ConceptLibrary: def get_pipe_library(self) -> PipeLibrary: return self.pipe_library + def get_dependency_library(self, alias: str) -> "Library | None": + """Get a child library for a dependency by alias. + + Args: + alias: The dependency alias + + Returns: + The child Library, or None if not found + """ + return self.dependency_libraries.get(alias) + + def resolve_concept(self, concept_ref: str) -> "Concept | None": + """Resolve a concept ref, routing cross-package refs through child libraries. + + For cross-package refs (containing '->'), splits into alias and remainder, + then looks up the concept in the corresponding child library's concept_library. + For local refs, looks up in the main concept_library. + + Args: + concept_ref: A concept ref, possibly cross-package (e.g. "alias->domain.Code") + + Returns: + The resolved Concept, or None if not found + """ + if QualifiedRef.has_cross_package_prefix(concept_ref): + alias, remainder = QualifiedRef.split_cross_package_ref(concept_ref) + child_library = self.dependency_libraries.get(alias) + if child_library is None: + return None + return child_library.concept_library.get_optional_concept(concept_ref=remainder) + return self.concept_library.get_optional_concept(concept_ref=concept_ref) + def teardown(self) -> None: + # Tear down child libraries first + for child_library in self.dependency_libraries.values(): + child_library.teardown() + self.dependency_libraries = {} self.pipe_library.teardown() self.concept_library.teardown() self.domain_library.teardown() - self.loaded_plx_paths = [] + self.loaded_mthds_paths = [] def validate_library(self) -> None: self.validate_domain_library_with_libraries() @@ -69,6 +112,9 @@ def validate_pipe_library_with_libraries(self) -> None: # Validate pipe dependencies exist for pipe controllers if isinstance(pipe, PipeController): for sub_pipe_code in pipe.pipe_dependencies(): + # Cross-package refs that aren't loaded are validated at package level, not library level + if QualifiedRef.has_cross_package_prefix(sub_pipe_code) and self.pipe_library.get_optional_pipe(sub_pipe_code) is None: + continue try: self.pipe_library.get_required_pipe(pipe_code=sub_pipe_code) except PipeLibraryError as pipe_error: @@ -76,10 +122,55 @@ def validate_pipe_library_with_libraries(self) -> None: raise LibraryError(msg) from pipe_error for pipe in self.pipe_library.root.values(): + # Skip full validation for pipe controllers with unresolved cross-package dependencies + if isinstance(pipe, PipeController) and self._has_unresolved_cross_package_deps(pipe): + continue pipe.validate_with_libraries() + def _has_unresolved_cross_package_deps(self, pipe: PipeController) -> bool: + """Check if a pipe controller has cross-package dependencies that aren't loaded. + + A cross-package dep is only "unresolved" if the alias has no child library + AND the pipe isn't found in the main pipe library. + + Args: + pipe: The pipe controller to check + + Returns: + True if the pipe has unresolved cross-package dependencies + """ + for dep_code in pipe.pipe_dependencies(): + if QualifiedRef.has_cross_package_prefix(dep_code): + # Check main pipe library first (aliased entries) + if self.pipe_library.get_optional_pipe(dep_code) is not None: + continue + # Check if the alias has a child library + alias, _remainder = QualifiedRef.split_cross_package_ref(dep_code) + if alias not in self.dependency_libraries: + return True + return False + def validate_concept_library_with_libraries(self) -> None: - pass + """Validate cross-package concept refines have their targets available. + + For each concept with a cross-package refines, verify the target exists + in the corresponding child library via resolve_concept(). + """ + for concept in self.concept_library.root.values(): + if concept.refines and QualifiedRef.has_cross_package_prefix(concept.refines): + resolved = self.resolve_concept(concept.refines) + if resolved is None: + alias, remainder = QualifiedRef.split_cross_package_ref(concept.refines) + if alias in self.dependency_libraries: + msg = ( + f"Concept '{concept.concept_ref}' refines cross-package concept '{concept.refines}' " + f"but '{remainder}' was not found in dependency '{alias}'" + ) + raise LibraryError(msg) + log.verbose( + f"Concept '{concept.concept_ref}' refines cross-package concept '{concept.refines}' " + f"from unloaded dependency '{alias}', skipping validation" + ) def validate_domain_library_with_libraries(self) -> None: pass diff --git a/pipelex/libraries/library_manager.py b/pipelex/libraries/library_manager.py index 7f5d697dc..319d50aca 100644 --- a/pipelex/libraries/library_manager.py +++ b/pipelex/libraries/library_manager.py @@ -17,6 +17,11 @@ from pipelex.core.domains.domain_factory import DomainFactory from pipelex.core.interpreter.exceptions import PipelexInterpreterError from pipelex.core.interpreter.interpreter import PipelexInterpreter +from pipelex.core.packages.dependency_resolver import ResolvedDependency, resolve_all_dependencies +from pipelex.core.packages.discovery import find_package_manifest +from pipelex.core.packages.exceptions import DependencyResolveError, ManifestError +from pipelex.core.packages.manifest import MTHDS_STANDARD_VERSION, MthdsPackageManifest +from pipelex.core.packages.visibility import PackageVisibilityChecker, check_visibility_for_blueprints from pipelex.core.pipes.pipe_abstract import PipeAbstract from pipelex.core.pipes.pipe_factory import PipeFactory from pipelex.core.stuffs.structured_content import StructuredContent @@ -30,11 +35,12 @@ from pipelex.libraries.library_factory import LibraryFactory from pipelex.libraries.library_manager_abstract import LibraryManagerAbstract from pipelex.libraries.library_utils import ( - get_pipelex_plx_files_from_dirs, + get_pipelex_mthds_files_from_dirs, ) from pipelex.libraries.pipe.exceptions import PipeLibraryError from pipelex.system.registries.class_registry_utils import ClassRegistryUtils from pipelex.system.registries.func_registry_utils import FuncRegistryUtils +from pipelex.tools.misc.semver import SemVerError, parse_constraint, parse_version, version_satisfies if TYPE_CHECKING: from pipelex.core.concepts.concept import Concept @@ -46,7 +52,7 @@ class LibraryManager(LibraryManagerAbstract): def __init__(self): # UNTITLED library is the fallback library for all others self._libraries: dict[str, Library] = {} - self._pipe_source_map: dict[str, Path] = {} # pipe_code -> source .plx file + self._pipe_source_map: dict[str, Path] = {} # pipe_code -> source .mthds file ############################################################ # Manager lifecycle @@ -122,7 +128,7 @@ def get_pipe_source(self, pipe_code: str) -> Path | None: pipe_code: The pipe code to look up. Returns: - Path to the .plx file the pipe was loaded from, or None if unknown. + Path to the .mthds file the pipe was loaded from, or None if unknown. """ return self._pipe_source_map.get(pipe_code) @@ -146,25 +152,25 @@ def load_libraries( library_dirs = [] all_dirs: list[Path] = [] - all_plx_paths: list[Path] = [] + all_mthds_paths: list[Path] = [] all_dirs.extend(library_dirs) - all_plx_paths.extend(get_pipelex_plx_files_from_dirs(set(library_dirs))) + all_mthds_paths.extend(get_pipelex_mthds_files_from_dirs(set(library_dirs))) if library_file_paths: - all_plx_paths.extend(library_file_paths) + all_mthds_paths.extend(library_file_paths) # Combine and deduplicate seen_absolute_paths: set[str] = set() - valid_plx_paths: list[Path] = [] - for plx_path in all_plx_paths: + valid_mthds_paths: list[Path] = [] + for mthds_path in all_mthds_paths: try: - absolute_path = str(plx_path.resolve()) + absolute_path = str(mthds_path.resolve()) except (OSError, RuntimeError): # For paths that can't be resolved (e.g., in zipped packages), use string representation - absolute_path = str(plx_path) + absolute_path = str(mthds_path) if absolute_path not in seen_absolute_paths: - valid_plx_paths.append(plx_path) + valid_mthds_paths.append(mthds_path) seen_absolute_paths.add(absolute_path) # Import modules and register in global registries @@ -188,9 +194,9 @@ def load_libraries( ) log.verbose(f"Auto-registered {num_registered} StructuredContent classes from loaded modules") - # Load PLX files into the specific library - log.verbose(f"Loading plx files from: {[str(p) for p in valid_plx_paths]}") - return self._load_plx_files_into_library(library_id=library_id, valid_plx_paths=valid_plx_paths) + # Load MTHDS files into the specific library + log.verbose(f"Loading MTHDS files from: {[str(p) for p in valid_mthds_paths]}") + return self._load_mthds_files_into_library(library_id=library_id, valid_mthds_paths=valid_mthds_paths) @override def load_libraries_concepts_only( @@ -207,8 +213,8 @@ def load_libraries_concepts_only( Args: library_id: The ID of the library to load into - library_dirs: List of directories containing PLX files - library_file_paths: List of specific PLX file paths to load + library_dirs: List of directories containing MTHDS files + library_file_paths: List of specific MTHDS file paths to load Returns: List of all concepts that were loaded @@ -222,25 +228,25 @@ def load_libraries_concepts_only( library_dirs = [] all_dirs: list[Path] = [] - all_plx_paths: list[Path] = [] + all_mthds_paths: list[Path] = [] all_dirs.extend(library_dirs) - all_plx_paths.extend(get_pipelex_plx_files_from_dirs(set(library_dirs))) + all_mthds_paths.extend(get_pipelex_mthds_files_from_dirs(set(library_dirs))) if library_file_paths: - all_plx_paths.extend(library_file_paths) + all_mthds_paths.extend(library_file_paths) # Combine and deduplicate seen_absolute_paths: set[str] = set() - valid_plx_paths: list[Path] = [] - for plx_path in all_plx_paths: + valid_mthds_paths: list[Path] = [] + for mthds_path in all_mthds_paths: try: - absolute_path = str(plx_path.resolve()) + absolute_path = str(mthds_path.resolve()) except (OSError, RuntimeError): # For paths that can't be resolved (e.g., in zipped packages), use string representation - absolute_path = str(plx_path) + absolute_path = str(mthds_path) if absolute_path not in seen_absolute_paths: - valid_plx_paths.append(plx_path) + valid_mthds_paths.append(mthds_path) seen_absolute_paths.add(absolute_path) # Import modules and register in global registries @@ -260,19 +266,19 @@ def load_libraries_concepts_only( ) log.debug(f"Auto-registered {num_registered} StructuredContent classes from loaded modules") - # Load PLX files as concepts only (no pipes) - log.debug(f"Loading concepts only from plx files: {[str(p) for p in valid_plx_paths]}") + # Load MTHDS files as concepts only (no pipes) + log.debug(f"Loading concepts only from MTHDS files: {[str(p) for p in valid_mthds_paths]}") library = self.get_library(library_id=library_id) all_concepts: list[Concept] = [] - for plx_path in valid_plx_paths: + for mthds_path in valid_mthds_paths: # Track loaded path (resolve if possible) try: - resolved_path = plx_path.resolve() + resolved_path = mthds_path.resolve() except (OSError, RuntimeError): - resolved_path = plx_path - library.loaded_plx_paths.append(resolved_path) + resolved_path = mthds_path + library.loaded_mthds_paths.append(resolved_path) - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_path) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_path) concepts = self.load_concepts_only_from_blueprints(library_id=library_id, blueprints=[blueprint]) all_concepts.extend(concepts) @@ -284,7 +290,7 @@ def load_from_blueprints(self, library_id: str, blueprints: list[PipelexBundleBl Args: library_id: The ID of the library to load into - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of all pipes that were loaded @@ -370,7 +376,7 @@ def load_concepts_only_from_blueprints( Args: library_id: The ID of the library to load into - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of all concepts that were loaded @@ -418,7 +424,7 @@ def _load_concepts_from_blueprints( later by _rebuild_models_with_forward_refs(). Args: - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of loaded concepts @@ -491,52 +497,335 @@ def _load_concepts_from_blueprints( # Private helper methods ############################################################ - def _load_plx_files_into_library(self, library_id: str, valid_plx_paths: list[Path]) -> list[PipeAbstract]: - """Load PLX files into a specific library. + def _load_mthds_files_into_library(self, library_id: str, valid_mthds_paths: list[Path]) -> list[PipeAbstract]: + """Load MTHDS files into a specific library. This method: - 1. Parses blueprints from PLX files - 2. Loads blueprints into the specified library + 1. Parses blueprints from MTHDS files + 2. Finds and loads dependency packages (if manifest has dependencies with local paths) + 3. Loads blueprints into the specified library Args: library_id: The ID of the library to load into - valid_plx_paths: List of PLX file paths to load + valid_mthds_paths: List of MTHDS file paths to load """ blueprints: list[PipelexBundleBlueprint] = [] - for plx_file_path in valid_plx_paths: + for mthds_file_path in valid_mthds_paths: try: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) - blueprint.source = str(plx_file_path) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file_path) + blueprint.source = str(mthds_file_path) except FileNotFoundError as file_not_found_error: - msg = f"Could not find PLX bundle at '{plx_file_path}'" + msg = f"Could not find MTHDS bundle at '{mthds_file_path}'" raise LibraryLoadingError(msg) from file_not_found_error except PipelexInterpreterError as interpreter_error: # Forward BLUEPRINT validation errors from interpreter - msg = f"Could not load PLX bundle from '{plx_file_path}' because of: {interpreter_error.message}" + msg = f"Could not load MTHDS bundle from '{mthds_file_path}' because of: {interpreter_error.message}" raise LibraryLoadingError( message=msg, blueprint_validation_errors=interpreter_error.validation_errors, ) from interpreter_error blueprints.append(blueprint) + # Find manifest and run package visibility validation + manifest = self._check_package_visibility(blueprints=blueprints, mthds_paths=valid_mthds_paths) + + # Warn if the package requires a newer MTHDS standard version + if manifest is not None and manifest.mthds_version is not None: + self._warn_if_mthds_version_unsatisfied( + mthds_version_constraint=manifest.mthds_version, + package_address=manifest.address, + ) + + # Load dependency packages if manifest has local-path dependencies + if manifest is not None and manifest.dependencies: + package_root = self._find_package_root(mthds_paths=valid_mthds_paths) + if package_root is not None: + self._load_dependency_packages( + library_id=library_id, + manifest=manifest, + package_root=package_root, + ) + # Store resolved absolute paths for duplicate detection in the library library = self.get_library(library_id=library_id) - for plx_file_path in valid_plx_paths: + for mthds_file_path in valid_mthds_paths: try: - resolved_path = plx_file_path.resolve() + resolved_path = mthds_file_path.resolve() except (OSError, RuntimeError): - resolved_path = plx_file_path - library.loaded_plx_paths.append(resolved_path) + resolved_path = mthds_file_path + library.loaded_mthds_paths.append(resolved_path) try: return self.load_from_blueprints(library_id=library_id, blueprints=blueprints) except ValidationError as validation_error: - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) - msg = f"Could not load blueprints from {[str(pth) for pth in valid_plx_paths]} because of: {validation_error_msg}" + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) + msg = f"Could not load blueprints from {[str(pth) for pth in valid_mthds_paths]} because of: {validation_error_msg}" raise LibraryError( message=msg, ) from validation_error + def _warn_if_mthds_version_unsatisfied( + self, + mthds_version_constraint: str, + package_address: str, + ) -> None: + """Emit a warning if the current MTHDS standard version does not satisfy the package's constraint.""" + try: + constraint = parse_constraint(mthds_version_constraint) + current_version = parse_version(MTHDS_STANDARD_VERSION) + except SemVerError as exc: + log.warning(f"Could not parse mthds_version constraint '{mthds_version_constraint}' for package '{package_address}': {exc}") + return + + if not version_satisfies(current_version, constraint): + log.warning( + f"Package '{package_address}' requires MTHDS standard version " + f"'{mthds_version_constraint}', but the current version is " + f"'{MTHDS_STANDARD_VERSION}'. Some features may not work correctly." + ) + + def _check_package_visibility( + self, + blueprints: list[PipelexBundleBlueprint], + mthds_paths: list[Path], + ) -> MthdsPackageManifest | None: + """Check package visibility if a METHODS.toml manifest exists. + + Walks up from the first bundle path to find a METHODS.toml manifest. + If found, validates all cross-domain pipe references against the exports. + + Args: + blueprints: The parsed bundle blueprints + mthds_paths: The MTHDS file paths that were loaded + + Returns: + The manifest if found, or None + """ + if not mthds_paths: + return None + + # Try to find a manifest from the first bundle path + try: + manifest = find_package_manifest(mthds_paths[0]) + except ManifestError as exc: + log.warning(f"Could not parse METHODS.toml: {exc.message}") + # Still enforce reserved domains even when manifest is unparseable + checker = PackageVisibilityChecker(manifest=None, bundles=blueprints) + reserved_errors = checker.validate_reserved_domains() + if reserved_errors: + error_messages = [err.message for err in reserved_errors] + joined_errors = "\n - ".join(error_messages) + msg = f"Reserved domain violations found:\n - {joined_errors}" + raise LibraryLoadingError(msg) from exc + return None + + if manifest is None: + # Still enforce reserved domains even for standalone bundles + checker = PackageVisibilityChecker(manifest=None, bundles=blueprints) + reserved_errors = checker.validate_reserved_domains() + if reserved_errors: + error_messages = [err.message for err in reserved_errors] + joined_errors = "\n - ".join(error_messages) + msg = f"Reserved domain violations found:\n - {joined_errors}" + raise LibraryLoadingError(msg) + return None + + visibility_errors = check_visibility_for_blueprints(manifest=manifest, blueprints=blueprints) + if visibility_errors: + error_messages = [err.message for err in visibility_errors] + joined_errors = "\n - ".join(error_messages) + msg = f"Package visibility violations found:\n - {joined_errors}" + raise LibraryLoadingError(msg) + + return manifest + + def _find_package_root(self, mthds_paths: list[Path]) -> Path | None: + """Find the package root directory by walking up from the first .mthds file. + + The package root is the directory containing METHODS.toml. + + Args: + mthds_paths: The MTHDS file paths + + Returns: + The package root path, or None + """ + if not mthds_paths: + return None + + current = mthds_paths[0].parent.resolve() + while True: + manifest_path = current / "METHODS.toml" + if manifest_path.is_file(): + return current + + git_dir = current / ".git" + if git_dir.exists(): + return None + + parent = current.parent + if parent == current: + return None + current = parent + + def _load_dependency_packages( + self, + library_id: str, + manifest: MthdsPackageManifest, + package_root: Path, + ) -> None: + """Load dependency packages into the library. + + Resolves local-path dependencies, parses their blueprints, and loads + their concepts and exported pipes into isolated child libraries. + Aliased entries are also added to the main library for backward-compatible lookups. + + Args: + library_id: The library to load into + manifest: The consuming package's manifest + package_root: The root directory of the consuming package + """ + try: + resolved_deps = resolve_all_dependencies(manifest=manifest, package_root=package_root) + except DependencyResolveError as exc: + msg = f"Failed to resolve dependencies: {exc}" + raise LibraryLoadingError(msg) from exc + + library = self.get_library(library_id=library_id) + + for resolved_dep in resolved_deps: + self._load_single_dependency( + library=library, + resolved_dep=resolved_dep, + ) + + # Wire concept resolver after all deps are loaded so cross-package + # refinement checks can traverse into child libraries + library.concept_library.set_concept_resolver(library.resolve_concept) + + def _load_single_dependency( + self, + library: Library, + resolved_dep: ResolvedDependency, + ) -> None: + """Load a single resolved dependency into an isolated child library. + + Creates a child Library for the dependency, loads domains/concepts/pipes + into it, registers it in library.dependency_libraries, and adds aliased + entries to the main library for backward-compatible cross-package lookups. + + Args: + library: The main library to load into + resolved_dep: The resolved dependency info + """ + alias = resolved_dep.alias + + # Parse dependency blueprints + dep_blueprints: list[PipelexBundleBlueprint] = [] + for mthds_path in resolved_dep.mthds_files: + try: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_path) + blueprint.source = str(mthds_path) + except (FileNotFoundError, PipelexInterpreterError) as exc: + log.warning(f"Could not parse dependency '{alias}' bundle '{mthds_path}': {exc}") + continue + dep_blueprints.append(blueprint) + + if not dep_blueprints: + log.warning(f"No valid blueprints found for dependency '{alias}'") + return + + # Warn if the dependency requires a newer MTHDS standard version + if resolved_dep.manifest is not None and resolved_dep.manifest.mthds_version is not None: + self._warn_if_mthds_version_unsatisfied( + mthds_version_constraint=resolved_dep.manifest.mthds_version, + package_address=resolved_dep.address, + ) + + # Create isolated child library for this dependency + child_library = LibraryFactory.make_empty() + + # Load domains into child library + all_domains: list[Domain] = [] + for blueprint in dep_blueprints: + domain = DomainFactory.make_from_blueprint( + blueprint=DomainBlueprint( + source=blueprint.source, + code=blueprint.domain, + description=blueprint.description or "", + system_prompt=blueprint.system_prompt, + ), + ) + all_domains.append(domain) + child_library.domain_library.add_domains(domains=all_domains) + + # Load concepts into child library + dep_concepts = self._load_concepts_from_blueprints(dep_blueprints) + child_library.concept_library.add_concepts(concepts=dep_concepts) + + # Collect main_pipes for auto-export + main_pipes: set[str] = set() + for blueprint in dep_blueprints: + if blueprint.main_pipe: + main_pipes.add(blueprint.main_pipe) + + # Determine if we filter by exports or load all. + # exported_pipe_codes is None when no manifest exists (all pipes public), + # or a set (possibly empty) when a manifest defines exports. + if resolved_dep.exported_pipe_codes is None: + # No manifest: all pipes are public, no filtering + has_exports = False + all_exported: set[str] = set() + else: + # Manifest exists: filter to exported pipes + main_pipes + has_exports = True + all_exported = resolved_dep.exported_pipe_codes | main_pipes + + # Temporarily register dep concepts in main library for pipe construction + # (PipeFactory resolves concepts through the hub's current library) + temp_concept_refs: list[str] = [] + for concept in dep_concepts: + if not library.concept_library.is_concept_exists(concept_ref=concept.concept_ref): + library.concept_library.add_new_concept(concept=concept) + temp_concept_refs.append(concept.concept_ref) + + # Load exported pipes into child library, ensuring temp concepts are + # always cleaned up even if an unexpected exception occurs + try: + concept_codes = [concept.code for concept in dep_concepts] + for blueprint in dep_blueprints: + if blueprint.pipe is None: + continue + for pipe_code, pipe_blueprint in blueprint.pipe.items(): + # If manifest has exports, only load exported pipes + if has_exports and pipe_code not in all_exported: + continue + try: + pipe = PipeFactory[PipeAbstract].make_from_blueprint( + domain_code=blueprint.domain, + pipe_code=pipe_code, + blueprint=pipe_blueprint, + concept_codes_from_the_same_domain=concept_codes, + ) + child_library.pipe_library.add_new_pipe(pipe=pipe) + except (PipeLibraryError, ValidationError) as exc: + log.warning(f"Could not load dependency '{alias}' pipe '{pipe_code}': {exc}") + finally: + # Remove temporary concept entries from main library + library.concept_library.remove_concepts_by_concept_refs(concept_refs=temp_concept_refs) + + # Register child library for isolation + library.dependency_libraries[alias] = child_library + + # Add aliased entries to main library for backward-compatible cross-package lookups + for concept in dep_concepts: + library.concept_library.add_dependency_concept(alias=alias, concept=concept) + + for pipe in child_library.pipe_library.get_pipes(): + library.pipe_library.add_dependency_pipe(alias=alias, pipe=pipe) + + log.verbose(f"Loaded dependency '{alias}': {len(dep_concepts)} concepts, pipes from {len(dep_blueprints)} bundles") + def _remove_pipes_from_blueprint(self, blueprint: PipelexBundleBlueprint) -> None: library = self.get_current_library() if blueprint.pipe is not None: diff --git a/pipelex/libraries/library_manager_abstract.py b/pipelex/libraries/library_manager_abstract.py index b8b1abfcd..22893bd3b 100644 --- a/pipelex/libraries/library_manager_abstract.py +++ b/pipelex/libraries/library_manager_abstract.py @@ -42,7 +42,7 @@ def get_pipe_source(self, pipe_code: str) -> Path | None: # noqa: ARG002 pipe_code: The pipe code to look up. Returns: - Path to the .plx file the pipe was loaded from, or None if unknown. + Path to the .mthds file the pipe was loaded from, or None if unknown. """ return None @@ -60,7 +60,7 @@ def load_concepts_only_from_blueprints(self, library_id: str, blueprints: list[P Args: library_id: The ID of the library to load into - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of all concepts that were loaded @@ -98,8 +98,8 @@ def load_libraries_concepts_only( Args: library_id: The ID of the library to load into - library_dirs: List of directories containing PLX files - library_file_paths: List of specific PLX file paths to load + library_dirs: List of directories containing MTHDS files + library_file_paths: List of specific MTHDS file paths to load Returns: List of all concepts that were loaded diff --git a/pipelex/libraries/library_utils.py b/pipelex/libraries/library_utils.py index 4af6521f9..a3bc1a8af 100644 --- a/pipelex/libraries/library_utils.py +++ b/pipelex/libraries/library_utils.py @@ -4,25 +4,25 @@ from pipelex import log from pipelex.builder import builder from pipelex.config import get_config -from pipelex.core.interpreter.helpers import is_pipelex_file +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION, is_pipelex_file from pipelex.tools.misc.file_utils import find_files_in_dir from pipelex.types import Traversable -def get_pipelex_plx_files_from_package() -> list[Path]: - """Get all PLX files from the pipelex package using importlib.resources. +def get_pipelex_mthds_files_from_package() -> list[Path]: + """Get all MTHDS files from the pipelex package using importlib.resources. This works reliably whether pipelex is installed as a wheel, from source, or as a relative path import. Returns: - List of Path objects to PLX files in pipelex package + List of Path objects to MTHDS files in pipelex package """ - plx_files: list[Path] = [] + mthds_files: list[Path] = [] pipelex_package = files("pipelex") - def _find_plx_in_traversable(traversable: Traversable, collected: list[Path]) -> None: - """Recursively find .plx files in a Traversable.""" + def _find_mthds_in_traversable(traversable: Traversable, collected: list[Path]) -> None: + """Recursively find .mthds files in a Traversable.""" excluded_dirs = get_config().pipelex.scan_config.excluded_dirs try: if not traversable.is_dir(): @@ -30,19 +30,19 @@ def _find_plx_in_traversable(traversable: Traversable, collected: list[Path]) -> for child in traversable.iterdir(): if child.is_file() and is_pipelex_file(Path(child.name)): - plx_path_str = str(child) - collected.append(Path(plx_path_str)) - log.verbose(f"Found pipelex package PLX file: {plx_path_str}") + mthds_path_str = str(child) + collected.append(Path(mthds_path_str)) + log.verbose(f"Found pipelex package MTHDS file: {mthds_path_str}") elif child.is_dir(): # Skip excluded directories if child.name not in excluded_dirs: - _find_plx_in_traversable(child, collected) + _find_mthds_in_traversable(child, collected) except (PermissionError, OSError) as exc: log.warning(f"Could not access {traversable}: {exc}") - _find_plx_in_traversable(pipelex_package, plx_files) - log.verbose(f"Found {len(plx_files)} PLX files in pipelex package") - return plx_files + _find_mthds_in_traversable(pipelex_package, mthds_files) + log.verbose(f"Found {len(mthds_files)} MTHDS files in pipelex package") + return mthds_files def get_pipelex_package_dir_for_imports() -> Path | None: @@ -62,27 +62,27 @@ def get_pipelex_package_dir_for_imports() -> Path | None: return None -def get_pipelex_plx_files_from_dirs(dirs: set[Path]) -> list[Path]: - """Get all valid Pipelex PLX files from the given directories.""" - all_plx_paths: list[Path] = [] +def get_pipelex_mthds_files_from_dirs(dirs: set[Path]) -> list[Path]: + """Get all valid Pipelex MTHDS files from the given directories.""" + all_mthds_paths: list[Path] = [] for dir_path in dirs: if not dir_path.exists(): log.debug(f"Directory does not exist, skipping: {dir_path}") continue - # Find all .plx files in the directory, excluding problematic directories - plx_files = find_files_in_dir( + # Find all .mthds files in the directory, excluding problematic directories + mthds_files = find_files_in_dir( dir_path=str(dir_path), - pattern="*.plx", + pattern=f"*{MTHDS_EXTENSION}", excluded_dirs=list(get_config().pipelex.scan_config.excluded_dirs), force_include_dirs=[str(Path(builder.__file__).parent)], ) # Filter to only include valid Pipelex files - for plx_file in plx_files: - if is_pipelex_file(plx_file): - all_plx_paths.append(plx_file) + for mthds_file in mthds_files: + if is_pipelex_file(mthds_file): + all_mthds_paths.append(mthds_file) else: - log.debug(f"Skipping non-Pipelex PLX file: {plx_file}") - return all_plx_paths + log.debug(f"Skipping non-Pipelex MTHDS file: {mthds_file}") + return all_mthds_paths diff --git a/pipelex/libraries/pipe/pipe_library.py b/pipelex/libraries/pipe/pipe_library.py index 25048f83a..5e7a4f0fa 100644 --- a/pipelex/libraries/pipe/pipe_library.py +++ b/pipelex/libraries/pipe/pipe_library.py @@ -7,6 +7,7 @@ from pipelex import pretty_print from pipelex.core.pipes.pipe_abstract import PipeAbstract +from pipelex.core.qualified_ref import QualifiedRef, QualifiedRefError from pipelex.libraries.pipe.exceptions import PipeLibraryError, PipeNotFoundError from pipelex.libraries.pipe.pipe_library_abstract import PipeLibraryAbstract from pipelex.types import Self @@ -53,13 +54,51 @@ def add_pipes(self, pipes: list[PipeAbstract]): @override def get_optional_pipe(self, pipe_code: str) -> PipeAbstract | None: - return self.root.get(pipe_code) + # Direct lookup first (bare code or exact match) + pipe = self.root.get(pipe_code) + if pipe is not None: + return pipe + # Cross-package: "alias->domain.pipe_code" -> lookup "alias->pipe_code" + if QualifiedRef.has_cross_package_prefix(pipe_code): + alias, remainder = QualifiedRef.split_cross_package_ref(pipe_code) + try: + ref = QualifiedRef.parse(remainder) + except QualifiedRefError: + return None + pipe = self.root.get(f"{alias}->{ref.local_code}") + if pipe is not None and ref.is_qualified and pipe.domain_code != ref.domain_path: + return None + return pipe + # If it's a domain-qualified ref (e.g. "scoring.compute_score"), try the local code + if "." in pipe_code: + try: + ref = QualifiedRef.parse(pipe_code) + except QualifiedRefError: + return None + pipe = self.root.get(ref.local_code) + if pipe is not None and ref.is_qualified and pipe.domain_code != ref.domain_path: + return None + return pipe + return None + + def add_dependency_pipe(self, alias: str, pipe: PipeAbstract) -> None: + """Add a pipe from a dependency package with an aliased key. + + Args: + alias: The dependency alias + pipe: The pipe to add + """ + key = f"{alias}->{pipe.code}" + if key in self.root: + msg = f"Dependency pipe '{key}' already exists in the library" + raise PipeLibraryError(msg) + self.root[key] = pipe @override def get_required_pipe(self, pipe_code: str) -> PipeAbstract: the_pipe = self.get_optional_pipe(pipe_code=pipe_code) if not the_pipe: - msg = f"Pipe '{pipe_code}' not found. Check for typos and make sure it is declared in plx file in an imported package." + msg = f"Pipe '{pipe_code}' not found. Check for typos and make sure it is declared in MTHDS file in an imported package." raise PipeNotFoundError(msg) return the_pipe diff --git a/pipelex/pipe_controllers/parallel/pipe_parallel.py b/pipelex/pipe_controllers/parallel/pipe_parallel.py index 90d5453e6..5dcc78ff5 100644 --- a/pipelex/pipe_controllers/parallel/pipe_parallel.py +++ b/pipelex/pipe_controllers/parallel/pipe_parallel.py @@ -13,6 +13,8 @@ from pipelex.core.pipes.inputs.input_stuff_specs_factory import InputStuffSpecsFactory from pipelex.core.pipes.pipe_output import PipeOutput from pipelex.core.stuffs.stuff_factory import StuffFactory +from pipelex.graph.graph_tracer_manager import GraphTracerManager +from pipelex.graph.graphspec import IOSpec from pipelex.hub import get_required_pipe from pipelex.libraries.pipe.exceptions import PipeNotFoundError from pipelex.pipe_controllers.pipe_controller import PipeController @@ -189,6 +191,21 @@ async def _live_run_controller_pipe( name=output_name, ) + # Register parallel combine edges BEFORE register_branch_outputs, because + # register_parallel_combine snapshots the original branch producers from + # _stuff_producer_map before register_controller_output overrides them + self._register_parallel_combine_with_graph_tracer( + job_metadata=job_metadata, + combined_stuff=combined_output_stuff, + branch_stuffs=output_stuffs, + ) + + # Register branch outputs with graph tracer so DATA edges flow from PipeParallel to downstream consumers + self._register_branch_outputs_with_graph_tracer( + job_metadata=job_metadata, + output_stuffs=output_stuffs, + ) + return PipeOutput( working_memory=working_memory, pipeline_run_id=job_metadata.pipeline_run_id, @@ -261,11 +278,94 @@ async def _dry_run_controller_pipe( stuff=combined_output_stuff, name=output_name, ) + + # Register parallel combine edges BEFORE register_branch_outputs, because + # register_parallel_combine snapshots the original branch producers from + # _stuff_producer_map before register_controller_output overrides them + self._register_parallel_combine_with_graph_tracer( + job_metadata=job_metadata, + combined_stuff=combined_output_stuff, + branch_stuffs=output_stuffs, + ) + + # Register branch outputs with graph tracer so DATA edges flow from PipeParallel to downstream consumers + self._register_branch_outputs_with_graph_tracer( + job_metadata=job_metadata, + output_stuffs=output_stuffs, + ) + return PipeOutput( working_memory=working_memory, pipeline_run_id=job_metadata.pipeline_run_id, ) + def _register_branch_outputs_with_graph_tracer( + self, + job_metadata: JobMetadata, + output_stuffs: dict[str, "Stuff"], + ) -> None: + """Register branch outputs with the graph tracer. + + This re-registers each branch output's stuff_code as produced by the PipeParallel + node, overriding the sub-pipe's registration so that DATA edges flow from + PipeParallel to downstream consumers. + + Args: + job_metadata: The job metadata containing graph context. + output_stuffs: Mapping of output_name to the branch output Stuff. + """ + graph_context = job_metadata.graph_context + if graph_context is None: + return + tracer_manager = GraphTracerManager.get_instance() + if tracer_manager is None or graph_context.parent_node_id is None: + return + for output_name_key, output_stuff in output_stuffs.items(): + output_spec = IOSpec( + name=output_name_key, + concept=output_stuff.concept.code, + content_type=output_stuff.content.content_type, + digest=output_stuff.stuff_code, + data=output_stuff.content.smart_dump() if graph_context.data_inclusion.stuff_json_content else None, + data_text=output_stuff.content.rendered_pretty_text() if graph_context.data_inclusion.stuff_text_content else None, + data_html=output_stuff.content.rendered_pretty_html() if graph_context.data_inclusion.stuff_html_content else None, + ) + tracer_manager.register_controller_output( + graph_id=graph_context.graph_id, + node_id=graph_context.parent_node_id, + output_spec=output_spec, + ) + + def _register_parallel_combine_with_graph_tracer( + self, + job_metadata: JobMetadata, + combined_stuff: "Stuff", + branch_stuffs: dict[str, "Stuff"], + ) -> None: + """Register parallel combine edges (branch outputs β†’ combined output). + + Creates PARALLEL_COMBINE edges showing how individual branch results + are merged into the combined output. + + Args: + job_metadata: The job metadata containing graph context. + combined_stuff: The combined output Stuff. + branch_stuffs: Mapping of output_name to the branch output Stuff. + """ + graph_context = job_metadata.graph_context + if graph_context is None: + return + tracer_manager = GraphTracerManager.get_instance() + if tracer_manager is None or graph_context.parent_node_id is None: + return + branch_stuff_codes = [stuff.stuff_code for stuff in branch_stuffs.values()] + tracer_manager.register_parallel_combine( + graph_id=graph_context.graph_id, + combined_stuff_code=combined_stuff.stuff_code, + branch_stuff_codes=branch_stuff_codes, + parallel_controller_node_id=graph_context.parent_node_id, + ) + @override async def _validate_before_run( self, job_metadata: JobMetadata, working_memory: WorkingMemory, pipe_run_params: PipeRunParams, output_name: str | None = None diff --git a/pipelex/pipe_controllers/parallel/pipe_parallel_blueprint.py b/pipelex/pipe_controllers/parallel/pipe_parallel_blueprint.py index 576277c96..a1c6f6886 100644 --- a/pipelex/pipe_controllers/parallel/pipe_parallel_blueprint.py +++ b/pipelex/pipe_controllers/parallel/pipe_parallel_blueprint.py @@ -12,7 +12,7 @@ class PipeParallelBlueprint(PipeBlueprint): type: Literal["PipeParallel"] = "PipeParallel" pipe_category: Literal["PipeController"] = "PipeController" - parallels: list[SubPipeBlueprint] + branches: list[SubPipeBlueprint] add_each_output: bool = False combined_output: str | None = None @@ -20,7 +20,7 @@ class PipeParallelBlueprint(PipeBlueprint): @override def pipe_dependencies(self) -> set[str]: """Return the set of pipe codes from the parallel branches.""" - return {parallel.pipe for parallel in self.parallels} + return {branch.pipe for branch in self.branches} @field_validator("combined_output", mode="before") @classmethod diff --git a/pipelex/pipe_controllers/parallel/pipe_parallel_factory.py b/pipelex/pipe_controllers/parallel/pipe_parallel_factory.py index a1a19c8a6..4e421c5b0 100644 --- a/pipelex/pipe_controllers/parallel/pipe_parallel_factory.py +++ b/pipelex/pipe_controllers/parallel/pipe_parallel_factory.py @@ -31,7 +31,7 @@ def make( blueprint: PipeParallelBlueprint, ) -> PipeParallel: parallel_sub_pipes: list[SubPipe] = [] - for sub_pipe_blueprint in blueprint.parallels: + for sub_pipe_blueprint in blueprint.branches: if not sub_pipe_blueprint.result: msg = f"Unexpected error in pipe '{pipe_code}': PipeParallel requires a result specified for each parallel sub pipe" raise PipeParallelFactoryError(message=msg) diff --git a/pipelex/pipe_controllers/sequence/pipe_sequence.py b/pipelex/pipe_controllers/sequence/pipe_sequence.py index ab84e51ec..5d2f7e425 100644 --- a/pipelex/pipe_controllers/sequence/pipe_sequence.py +++ b/pipelex/pipe_controllers/sequence/pipe_sequence.py @@ -10,7 +10,8 @@ from pipelex.core.pipes.inputs.input_stuff_specs_factory import InputStuffSpecsFactory from pipelex.core.pipes.pipe_output import PipeOutput from pipelex.core.pipes.variable_multiplicity import is_multiplicity_compatible -from pipelex.hub import get_concept_library, get_required_pipe +from pipelex.core.qualified_ref import QualifiedRef +from pipelex.hub import get_concept_library, get_optional_pipe, get_required_pipe from pipelex.pipe_controllers.parallel.pipe_parallel import PipeParallel from pipelex.pipe_controllers.pipe_controller import PipeController from pipelex.pipe_controllers.sequence.exceptions import PipeSequenceValueError @@ -54,7 +55,11 @@ def validate_output_with_library(self): The output of the pipe sequence should match the output of the last step, both in terms of concept compatibility and multiplicity. """ - last_step_pipe = get_required_pipe(pipe_code=self.sequential_sub_pipes[-1].pipe_code) + last_step_pipe_code = self.sequential_sub_pipes[-1].pipe_code + # Skip output validation if the last step is an unresolved cross-package ref + if QualifiedRef.has_cross_package_prefix(last_step_pipe_code) and get_optional_pipe(pipe_code=last_step_pipe_code) is None: + return + last_step_pipe = get_required_pipe(pipe_code=last_step_pipe_code) # Check concept compatibility if not get_concept_library().is_compatible(tested_concept=last_step_pipe.output.concept, wanted_concept=self.output.concept): @@ -113,7 +118,13 @@ def needed_inputs(self, visited_pipes: set[str] | None = None) -> InputStuffSpec generated_outputs: set[str] = set() for sequential_sub_pipe in self.sequential_sub_pipes: - sub_pipe = get_required_pipe(pipe_code=sequential_sub_pipe.pipe_code) + # Skip cross-package pipe refs that aren't loaded yet (dependency not resolved) + if QualifiedRef.has_cross_package_prefix(sequential_sub_pipe.pipe_code): + sub_pipe = get_optional_pipe(pipe_code=sequential_sub_pipe.pipe_code) + if sub_pipe is None: + continue + else: + sub_pipe = get_required_pipe(pipe_code=sequential_sub_pipe.pipe_code) # Use the centralized recursion detection sub_pipe_needed_inputs = sub_pipe.needed_inputs(visited_pipes_with_current) diff --git a/pipelex/pipe_operators/compose/construct_blueprint.py b/pipelex/pipe_operators/compose/construct_blueprint.py index f88b5024e..373f2e275 100644 --- a/pipelex/pipe_operators/compose/construct_blueprint.py +++ b/pipelex/pipe_operators/compose/construct_blueprint.py @@ -80,14 +80,14 @@ def validate_method_data_consistency(self) -> Self: raise ValueError(msg) return self - def to_plx_dict(self) -> Any: - """Convert to PLX-format dict for serialization. + def to_mthds_dict(self) -> Any: + """Convert to MTHDS-format dict for serialization. - Returns the format expected in PLX files: + Returns the format expected in MTHDS files: - FIXED: Just the value itself - FROM_VAR: { from: "path" } with optional list_to_dict_keyed_by - TEMPLATE: { template: "..." } - - NESTED: The nested construct's PLX dict + - NESTED: The nested construct's MTHDS dict """ match self.method: case ConstructFieldMethod.FIXED: @@ -101,7 +101,7 @@ def to_plx_dict(self) -> Any: return {"template": self.template} case ConstructFieldMethod.NESTED: if self.nested: - return self.nested.to_plx_dict() + return self.nested.to_mthds_dict() return {} @classmethod @@ -197,7 +197,7 @@ def make_from_raw(cls, raw: Any) -> ConstructFieldBlueprint: class ConstructBlueprint(BaseModel): """Blueprint for composing a StructuredContent from working memory. - Parsed from `[pipe.name.construct]` section in PLX files. + Parsed from `[pipe.name.construct]` section in MTHDS files. Attributes: fields: Dictionary mapping field names to their composition blueprints @@ -270,23 +270,23 @@ def get_required_variables(self) -> set[str]: return required - def to_plx_dict(self) -> dict[str, Any]: - """Convert to PLX-format dict (fields at root, no wrapper). + def to_mthds_dict(self) -> dict[str, Any]: + """Convert to MTHDS-format dict (fields at root, no wrapper). - Returns the format expected in PLX files where field names are at + Returns the format expected in MTHDS files where field names are at the root level, not wrapped in a 'fields' key. """ - return {field_name: field_bp.to_plx_dict() for field_name, field_bp in self.fields.items()} + return {field_name: field_bp.to_mthds_dict() for field_name, field_bp in self.fields.items()} @model_serializer(mode="wrap") def serialize_with_context(self, handler: SerializerFunctionWrapHandler, info: SerializationInfo) -> dict[str, Any]: """Serialize with format-aware context. - When context contains {"format": "plx"}, outputs PLX-format dict. + When context contains {"format": "mthds"}, outputs MTHDS-format dict. Otherwise, uses default Pydantic serialization. """ - if info.context and info.context.get("format") == "plx": - return self.to_plx_dict() + if info.context and info.context.get("format") == "mthds": + return self.to_mthds_dict() result = handler(self) return dict(result) # Ensure dict return type diff --git a/pipelex/pipe_operators/compose/pipe_compose_blueprint.py b/pipelex/pipe_operators/compose/pipe_compose_blueprint.py index 6050137b7..fb2b41e21 100644 --- a/pipelex/pipe_operators/compose/pipe_compose_blueprint.py +++ b/pipelex/pipe_operators/compose/pipe_compose_blueprint.py @@ -24,7 +24,7 @@ class PipeComposeBlueprint(PipeBlueprint): # Either template or construct must be provided, but not both # Note: The field is named 'construct_blueprint' internally to avoid conflict with Pydantic's - # BaseModel.construct() method. In PLX/TOML files, use 'construct' (via aliases). + # BaseModel.construct() method. In MTHDS/TOML files, use 'construct' (via aliases). template: str | TemplateBlueprint | None = None construct_blueprint: ConstructBlueprint | None = Field(default=None, validation_alias="construct", serialization_alias="construct") diff --git a/pipelex/pipe_operators/extract/pipe_extract.py b/pipelex/pipe_operators/extract/pipe_extract.py index 0e97f2d84..e4217c2e4 100644 --- a/pipelex/pipe_operators/extract/pipe_extract.py +++ b/pipelex/pipe_operators/extract/pipe_extract.py @@ -137,7 +137,7 @@ async def _live_run_operator_pipe( extract_choice: ExtractModelChoice = self.extract_choice or get_model_deck().extract_choice_default extract_setting: ExtractSetting = get_model_deck().get_extract_setting(extract_choice=extract_choice) - # PLX-level max_page_images takes precedence if set, otherwise use ExtractSetting + # MTHDS-level max_page_images takes precedence if set, otherwise use ExtractSetting max_nb_images = self.max_page_images if self.max_page_images is not None else extract_setting.max_nb_images extract_job_params = ExtractJobParams( diff --git a/pipelex/pipe_run/dry_run.py b/pipelex/pipe_run/dry_run.py index 1c1aedcf2..ec3479e1f 100644 --- a/pipelex/pipe_run/dry_run.py +++ b/pipelex/pipe_run/dry_run.py @@ -11,6 +11,7 @@ from pipelex.core.stuffs.stuff_content import StuffContent from pipelex.core.stuffs.text_content import TextContent from pipelex.hub import get_class_registry +from pipelex.libraries.pipe.exceptions import PipeNotFoundError from pipelex.pipe_operators.compose.exceptions import PipeComposeError from pipelex.pipe_run.exceptions import PipeRunError from pipelex.pipe_run.pipe_run_params import PipeRunMode @@ -30,13 +31,22 @@ class DryRunError(PipelexError): class DryRunStatus(StrEnum): SUCCESS = "SUCCESS" FAILURE = "FAILURE" + SKIPPED = "SKIPPED" @property def is_failure(self) -> bool: match self: case DryRunStatus.FAILURE: return True + case DryRunStatus.SUCCESS | DryRunStatus.SKIPPED: + return False + + @property + def is_success(self) -> bool: + match self: case DryRunStatus.SUCCESS: + return True + case DryRunStatus.FAILURE | DryRunStatus.SKIPPED: return False @@ -56,6 +66,11 @@ async def dry_run_pipe(pipe: PipeAbstract, raise_on_failure: bool = False) -> Dr working_memory=working_memory, pipe_run_params=PipeRunParamsFactory.make_run_params(pipe_run_mode=PipeRunMode.DRY), ) + except PipeNotFoundError as not_found_error: + # Cross-package pipe dependencies may not be loaded; skip gracefully during dry-run + error_message = f"Skipped dry run for pipe '{pipe.code}': unresolved dependency: {not_found_error}" + log.verbose(error_message) + return DryRunOutput(pipe_code=pipe.code, status=DryRunStatus.SKIPPED, error_message=error_message) except (PipeStackOverflowError, ValidationError, PipeComposeError) as exc: formatted_error = format_pydantic_validation_error(exc) if isinstance(exc, ValidationError) else str(exc) if pipe.code in get_config().pipelex.dry_run_config.allowed_to_fail_pipes: @@ -99,18 +114,21 @@ async def dry_run_pipes(pipes: list[PipeAbstract], raise_on_failure: bool = True successful_pipes: list[str] = [] failed_pipes: list[str] = [] + skipped_pipes: list[str] = [] for pipe_code, dry_run_output in results.items(): match dry_run_output.status: case DryRunStatus.SUCCESS: successful_pipes.append(pipe_code) case DryRunStatus.FAILURE: failed_pipes.append(pipe_code) + case DryRunStatus.SKIPPED: + skipped_pipes.append(pipe_code) unexpected_failures = {pipe_code: results[pipe_code] for pipe_code in failed_pipes if pipe_code not in allowed_to_fail_pipes} log.verbose( f"Dry run completed: {len(successful_pipes)} successful, {len(failed_pipes)} failed, " - f"{len(allowed_to_fail_pipes)} allowed to fail, in {time.time() - start_time:.2f} seconds", + f"{len(skipped_pipes)} skipped, {len(allowed_to_fail_pipes)} allowed to fail, in {time.time() - start_time:.2f} seconds", ) if unexpected_failures: unexpected_failures_details = "\n".join([f"'{pipe_code}': {results[pipe_code]}" for pipe_code in unexpected_failures]) diff --git a/pipelex/pipelex.py b/pipelex/pipelex.py index de4643678..96f2541cb 100644 --- a/pipelex/pipelex.py +++ b/pipelex/pipelex.py @@ -50,7 +50,7 @@ from pipelex.reporting.reporting_protocol import ReportingNoOp, ReportingProtocol from pipelex.system.configuration.config_loader import config_manager from pipelex.system.configuration.config_root import ConfigRoot -from pipelex.system.configuration.configs import ConfigPaths, PipelexConfig +from pipelex.system.configuration.configs import PipelexConfig from pipelex.system.environment import get_pipelexpath_dirs from pipelex.system.pipelex_service.exceptions import ( GatewayTermsNotAcceptedError, @@ -96,7 +96,7 @@ def __init__( config_cls: type[ConfigRoot] | None = None, ) -> None: self.is_pipelex_service_enabled = False # Will be set during setup - self.config_dir_path = config_dir_path or ConfigPaths.DEFAULT_CONFIG_DIR_PATH + self.config_dir_path = config_dir_path or config_manager.pipelex_config_dir self.pipelex_hub = PipelexHub() set_pipelex_hub(self.pipelex_hub) diff --git a/pipelex/pipelex.toml b/pipelex/pipelex.toml index 0254b537d..2ccc3d7e6 100644 --- a/pipelex/pipelex.toml +++ b/pipelex/pipelex.toml @@ -31,19 +31,19 @@ observer_dir = "results/observer" [pipelex.scan_config] excluded_dirs = [ - ".venv", - "venv", - "env", - ".env", - "virtualenv", - ".virtualenv", - ".git", - "__pycache__", - ".pytest_cache", - ".mypy_cache", - ".ruff_cache", - "node_modules", - "results", + ".venv", + "venv", + "env", + ".env", + "virtualenv", + ".virtualenv", + ".git", + "__pycache__", + ".pytest_cache", + ".mypy_cache", + ".ruff_cache", + "node_modules", + "results", ] [pipelex.builder_config] @@ -205,7 +205,7 @@ max = "reasoning" max_retries = 3 [cogt.llm_config.effort_to_budget_maps.anthropic] -none = 0 # Required by validator; unreachable at runtime (level map gates NONE as disabled before budget lookup) +none = 0 # Required by validator; unreachable at runtime (level map gates NONE as disabled before budget lookup) minimal = 512 low = 1024 medium = 5000 @@ -213,7 +213,7 @@ high = 16384 max = 65536 [cogt.llm_config.effort_to_budget_maps.gemini] -none = 0 # Required by validator; unreachable at runtime (level map gates NONE as disabled before budget lookup) +none = 0 # Required by validator; unreachable at runtime (level map gates NONE as disabled before budget lookup) minimal = 512 low = 1024 medium = 5000 @@ -382,32 +382,40 @@ text_gen_truncate_length = 256 nb_list_items = 3 nb_extract_pages = 4 allowed_to_fail_pipes = [ - "infinite_loop_1", # Loop but only for testing purposes - "pipe_builder", # Still not fully proofed + "infinite_loop_1", # Loop but only for testing purposes + "pipe_builder", # Still not fully proofed ] image_urls = [ - "https://storage.googleapis.com/public_test_files_7fa6_4277_9ab/fashion/fashion_photo_1.jpg", - "https://storage.googleapis.com/public_test_files_7fa6_4277_9ab/fashion/fashion_photo_2.png", + "https://storage.googleapis.com/public_test_files_7fa6_4277_9ab/fashion/fashion_photo_1.jpg", + "https://storage.googleapis.com/public_test_files_7fa6_4277_9ab/fashion/fashion_photo_2.png", ] #################################################################################################### -# PLX config +# MTHDS config #################################################################################################### -[pipelex.plx_config.inline_tables] +[pipelex.mthds_config.inline_tables] spaces_inside_curly_braces = true -[pipelex.plx_config.strings] +[pipelex.mthds_config.strings] prefer_literal = false force_multiline = false length_limit_to_multiline = 100 ensure_trailing_newline = true ensure_leading_blank_line = true -[pipelex.plx_config.concepts] -structure_field_ordering = ["type", "concept_ref", "item_type", "item_concept_ref", "description", "choices", "required"] +[pipelex.mthds_config.concepts] +structure_field_ordering = [ + "type", + "concept_ref", + "item_type", + "item_concept_ref", + "description", + "choices", + "required", +] -[pipelex.plx_config.pipes] +[pipelex.mthds_config.pipes] field_ordering = ["type", "description", "inputs", "output"] #################################################################################################### @@ -423,7 +431,7 @@ llm_handle = "model" llm = "model" llm_to_structure = "model_to_structure" -[migration.migration_maps.plx] +[migration.migration_maps.mthds] img_gen = "model" ocr = "model" llm_handle = "model" @@ -432,6 +440,7 @@ llm_to_structure = "model_to_structure" llm_skill = "llm_talent" img_gen_skill = "img_gen_talent" extract_skill = "extract_talent" +parallels = "branches" #################################################################################################### diff --git a/pipelex/pipeline/execute.py b/pipelex/pipeline/execute.py deleted file mode 100644 index 04895567c..000000000 --- a/pipelex/pipeline/execute.py +++ /dev/null @@ -1,203 +0,0 @@ -from pathlib import Path -from typing import TYPE_CHECKING, Any - -from pydantic import ValidationError - -from pipelex.base_exceptions import PipelexError -from pipelex.client.protocol import PipelineInputs -from pipelex.config import get_config -from pipelex.core.memory.working_memory import WorkingMemory -from pipelex.core.pipes.pipe_output import PipeOutput -from pipelex.graph.graph_tracer_manager import GraphTracerManager -from pipelex.hub import ( - get_library_manager, - get_pipe_router, - get_telemetry_manager, - teardown_current_library, -) -from pipelex.pipe_run.exceptions import PipeRouterError -from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipe_run.pipe_run_params import VariableMultiplicity -from pipelex.pipeline.exceptions import PipeExecutionError, PipelineExecutionError -from pipelex.pipeline.pipeline_run_setup import pipeline_run_setup -from pipelex.system.configuration.configs import PipelineExecutionConfig -from pipelex.system.telemetry.events import EventName, EventProperty, Outcome -from pipelex.tools.typing.pydantic_utils import format_pydantic_validation_error - -if TYPE_CHECKING: - from pipelex.pipe_run.pipe_job import PipeJob - - -async def execute_pipeline( - user_id: str | None = None, - library_id: str | None = None, - library_dirs: list[str] | None = None, - pipe_code: str | None = None, - plx_content: str | None = None, - bundle_uri: str | None = None, - inputs: PipelineInputs | WorkingMemory | None = None, - output_name: str | None = None, - output_multiplicity: VariableMultiplicity | None = None, - dynamic_output_concept_code: str | None = None, - pipe_run_mode: PipeRunMode | None = None, - search_domain_codes: list[str] | None = None, - execution_config: PipelineExecutionConfig | None = None, -) -> PipeOutput: - """Execute a pipeline and wait for its completion. - - This function executes a pipe and returns its output. Unlike ``start_pipeline``, - this function waits for the pipe execution to complete before returning. - - Parameters - ---------- - library_id: - Unique identifier for the library instance. If not provided, defaults to the - auto-generated ``pipeline_run_id``. Use a custom ID when you need to manage - multiple library instances or maintain library state across executions. - library_dirs: - List of directory paths to load pipe definitions from. Combined with directories - from the ``PIPELEXPATH`` environment variable (PIPELEXPATH directories are searched - first). When provided alongside ``plx_content``, definitions from both sources - are loaded into the library. - pipe_code: - Code identifying the pipe to execute. Required when ``plx_content`` is not - provided. When both ``plx_content`` and ``pipe_code`` are provided, the - specified pipe from the PLX content will be executed (overriding any - ``main_pipe`` defined in the plx_content). - plx_content: - Complete PLX file content as a string. The pipe to execute is determined by - ``pipe_code`` (if provided) or the ``main_pipe`` property in the PLX content. - Can be combined with ``library_dirs`` to load additional definitions. - bundle_uri: - URI identifying the bundle. If ``plx_content`` is not provided and ``bundle_uri`` - points to a local file path, the content will be read from that file. Also used - to detect if the bundle was already loaded from library directories (e.g., via - PIPELEXPATH) to avoid duplicate domain registration. - inputs: - Inputs passed to the pipeline. Can be either a ``PipelineInputs`` dictionary - or a ``WorkingMemory`` instance. - output_name: - Name of the output slot to write to. - output_multiplicity: - Output multiplicity specification. - dynamic_output_concept_code: - Override the dynamic output concept code. - pipe_run_mode: - Pipe run mode: ``PipeRunMode.LIVE`` or ``PipeRunMode.DRY``. If not specified, - inferred from the environment variable ``PIPELEX_FORCE_DRY_RUN_MODE``. Defaults - to ``PipeRunMode.LIVE`` if the environment variable is not set. - search_domain_codes: - List of domain codes to search for pipes. The executed pipe's domain is automatically - added if not already present. - user_id: - Unique identifier for the user. - execution_config: - Pipeline execution configuration including graph tracing settings. - If provided, uses this config directly. If None, uses the default from - ``get_config().pipelex.pipeline_execution_config``. Use the ``mock_inputs`` - field to generate mock data for missing required inputs during dry-run. - - Returns: - ------- - PipeOutput - The pipe output from the execution. If ``generate_graph`` was True, the - execution graph is available in ``pipe_output.graph_spec``. - - """ - # Use provided config or get default - execution_config = execution_config or get_config().pipelex.pipeline_execution_config - - # If plx_content is not provided but bundle_uri points to a file, read it - if plx_content is None and bundle_uri is not None: - bundle_path = Path(bundle_uri) - if bundle_path.is_file(): - plx_content = bundle_path.read_text(encoding="utf-8") - - properties: dict[EventProperty, Any] - graph_spec_result = None - # These variables are set in pipeline_run_setup and needed in finally/except blocks - pipeline_run_id: str | None = None - library_id_resolved: str | None = None - pipe_job: PipeJob | None = None - try: - pipe_job, pipeline_run_id, library_id_resolved = await pipeline_run_setup( - execution_config=execution_config, - library_id=library_id, - library_dirs=library_dirs, - pipe_code=pipe_code, - plx_content=plx_content, - bundle_uri=bundle_uri, - inputs=inputs, - output_name=output_name, - output_multiplicity=output_multiplicity, - dynamic_output_concept_code=dynamic_output_concept_code, - pipe_run_mode=pipe_run_mode, - search_domain_codes=search_domain_codes, - user_id=user_id, - ) - pipe_output = await get_pipe_router().run(pipe_job) - except PipeRouterError as exc: - # PipeRouterError can only be raised by get_pipe_router().run(), so pipe_job is guaranteed to exist - assert pipe_job is not None # for type checker - properties = { - EventProperty.PIPELINE_RUN_ID: pipeline_run_id, - EventProperty.PIPE_TYPE: pipe_job.pipe.pipe_type, - EventProperty.PIPELINE_OUTCOME: Outcome.FAILURE, - } - get_telemetry_manager().track_event(event_name=EventName.PIPELINE_COMPLETE, properties=properties) - raise PipelineExecutionError( - message=exc.message, - run_mode=pipe_job.pipe_run_params.run_mode, - pipe_code=pipe_job.pipe.code, - output_name=pipe_job.output_name, - pipe_stack=pipe_job.pipe_run_params.pipe_stack, - ) from exc - except PipelexError as exc: - # Catch other Pipelex errors that bypass the router's PipeRunError handling - # (e.g., PipeRunInputsError raised directly from pipe_abstract.py) - # If pipe_job is None, the error occurred during pipeline_run_setup before pipe_job was created - if pipe_job is None: - raise - properties = { - EventProperty.PIPELINE_RUN_ID: pipeline_run_id, - EventProperty.PIPE_TYPE: pipe_job.pipe.pipe_type, - EventProperty.PIPELINE_OUTCOME: Outcome.FAILURE, - } - get_telemetry_manager().track_event(event_name=EventName.PIPELINE_COMPLETE, properties=properties) - raise PipelineExecutionError( - message=exc.message, - run_mode=pipe_job.pipe_run_params.run_mode, - pipe_code=pipe_job.pipe.code, - output_name=pipe_job.output_name, - pipe_stack=pipe_job.pipe_run_params.pipe_stack, - ) from exc - except ValidationError as exc: - formatted_error = format_pydantic_validation_error(exc) - model_name = exc.title - msg = f"Input validation failed for '{model_name}': {formatted_error}" - raise PipeExecutionError(message=msg) from exc - finally: - # Close graph tracer if it was opened (capture graph even on failure) - # pipeline_run_id may be None if pipeline_run_setup failed early - if execution_config.is_generate_graph and pipeline_run_id is not None: - tracer_manager = GraphTracerManager.get_instance() - if tracer_manager is not None: - graph_spec_result = tracer_manager.close_tracer(pipeline_run_id) - - # Only teardown library if it was successfully created - if library_id_resolved is not None: - library = get_library_manager().get_library(library_id=library_id_resolved) - library.teardown() - teardown_current_library() - - # Assign graph spec to output (only reached on success, when pipe_output is bound) - if graph_spec_result is not None: - pipe_output.graph_spec = graph_spec_result - - properties = { - EventProperty.PIPELINE_RUN_ID: pipeline_run_id, - EventProperty.PIPE_TYPE: pipe_job.pipe.pipe_type, - EventProperty.PIPELINE_OUTCOME: Outcome.SUCCESS, - } - get_telemetry_manager().track_event(event_name=EventName.PIPELINE_COMPLETE, properties=properties) - return pipe_output diff --git a/pipelex/pipeline/pipeline_response.py b/pipelex/pipeline/pipeline_response.py new file mode 100644 index 000000000..c2e47b8da --- /dev/null +++ b/pipelex/pipeline/pipeline_response.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +from mthds.pipeline import MAIN_STUFF_NAME, PipelineExecuteResponse, PipelineStartResponse, PipelineState + +from pipelex.core.pipes.pipe_output import PipeOutput + + +class PipelexPipelineExecuteResponse(PipelineExecuteResponse[PipeOutput]): + @classmethod + def from_pipe_output( + cls, + pipe_output: PipeOutput, + pipeline_run_id: str = "", + created_at: str = "", + pipeline_state: PipelineState = PipelineState.COMPLETED, + finished_at: str | None = None, + ) -> PipelexPipelineExecuteResponse: + return cls( + pipeline_run_id=pipeline_run_id, + created_at=created_at, + pipeline_state=pipeline_state, + finished_at=finished_at, + pipe_output=pipe_output, + main_stuff_name=pipe_output.working_memory.aliases.get(MAIN_STUFF_NAME, MAIN_STUFF_NAME), + ) + + +class PipelexPipelineStartResponse(PipelineStartResponse[PipeOutput]): + pass diff --git a/pipelex/pipeline/pipeline_run_setup.py b/pipelex/pipeline/pipeline_run_setup.py index b5ab958a1..1f33a194a 100644 --- a/pipelex/pipeline/pipeline_run_setup.py +++ b/pipelex/pipeline/pipeline_run_setup.py @@ -1,8 +1,9 @@ from pathlib import Path from typing import TYPE_CHECKING +from mthds.models.pipeline_inputs import PipelineInputs + from pipelex import log -from pipelex.client.protocol import PipelineInputs from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.memory.working_memory import WorkingMemory from pipelex.core.memory.working_memory_factory import WorkingMemoryFactory @@ -47,7 +48,7 @@ async def pipeline_run_setup( library_id: str | None = None, library_dirs: list[str] | None = None, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, bundle_uri: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, @@ -75,22 +76,22 @@ async def pipeline_run_setup( library_dirs: List of directory paths to load pipe definitions from. Combined with directories from the ``PIPELEXPATH`` environment variable (PIPELEXPATH directories are searched - first). When provided alongside ``plx_content``, definitions from both sources + first). When provided alongside ``mthds_content``, definitions from both sources are loaded into the library. pipe_code: - Code identifying the pipe to execute. Required when ``plx_content`` is not - provided. When both ``plx_content`` and ``pipe_code`` are provided, the - specified pipe from the PLX content will be executed (overriding any + Code identifying the pipe to execute. Required when ``mthds_content`` is not + provided. When both ``mthds_content`` and ``pipe_code`` are provided, the + specified pipe from the MTHDS content will be executed (overriding any ``main_pipe`` defined in the content). - plx_content: - Complete PLX file content as a string. The pipe to execute is determined by - ``pipe_code`` (if provided) or the ``main_pipe`` property in the PLX content. + mthds_content: + Complete MTHDS file content as a string. The pipe to execute is determined by + ``pipe_code`` (if provided) or the ``main_pipe`` property in the MTHDS content. Can be combined with ``library_dirs`` to load additional definitions. bundle_uri: URI identifying the bundle. Used to detect if the bundle was already loaded from library directories (e.g., via PIPELEXPATH) to avoid duplicate domain registration. If provided and the resolved absolute path is already in the - loaded PLX paths, the ``plx_content`` loading will be skipped. + loaded MTHDS paths, the ``mthds_content`` loading will be skipped. inputs: Inputs passed to the pipeline. Can be either a ``PipelineInputs`` dictionary or a ``WorkingMemory`` instance. @@ -118,8 +119,8 @@ async def pipeline_run_setup( """ user_id = user_id or OTelConstants.DEFAULT_USER_ID - if not plx_content and not pipe_code: - msg = "Either pipe_code or plx_content must be provided to the pipeline API." + if not mthds_content and not pipe_code: + msg = "Either pipe_code or mthds_content must be provided to the pipeline API." raise ValueError(msg) pipeline = get_pipeline_manager().add_new_pipeline(pipe_code=pipe_code) @@ -148,9 +149,9 @@ async def pipeline_run_setup( else: log.verbose(f"No library directories to load ({source_label})") - # Then handle plx_content or pipe_code - if plx_content: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + # Then handle MTHDS content or pipe_code + if mthds_content: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) blueprints_to_load = [blueprint] # Check if this bundle was already loaded from library directories @@ -159,30 +160,30 @@ async def pipeline_run_setup( try: resolved_bundle_uri = Path(bundle_uri).resolve() except (OSError, RuntimeError): - # Use str(Path(...)) to normalize the path (e.g., "./file.plx" -> "file.plx") - # to match the normalization done in library_manager._load_plx_files_into_library + # Use str(Path(...)) to normalize the path (e.g., "./file.mthds" -> "file.mthds") + # to match the normalization done in library_manager._load_mthds_files_into_library resolved_bundle_uri = Path(bundle_uri) current_library = library_manager.get_library(library_id=library_id) - bundle_already_loaded = resolved_bundle_uri in current_library.loaded_plx_paths + bundle_already_loaded = resolved_bundle_uri in current_library.loaded_mthds_paths if bundle_already_loaded: log.verbose(f"Bundle '{bundle_uri}' already loaded from library directories, skipping duplicate load") if not bundle_already_loaded: library_manager.load_from_blueprints(library_id=library_id, blueprints=blueprints_to_load) - # For now, we only support one blueprint when given a plx_content. So blueprints is of length 1. + # For now, we only support one blueprint when given MTHDS content. So blueprints is of length 1. # blueprint is already set from make_pipelex_bundle_blueprint above if pipe_code: pipe = get_required_pipe(pipe_code=pipe_code) elif blueprint.main_pipe: pipe = get_required_pipe(pipe_code=blueprint.main_pipe) else: - msg = "No pipe code or main pipe in the PLX content provided to the pipeline API." + msg = "No pipe code or main pipe in the MTHDS content provided to the pipeline API." raise PipeExecutionError(message=msg) elif pipe_code: pipe = get_required_pipe(pipe_code=pipe_code) else: - msg = "Either provide pipe_code or plx_content to the pipeline API. 'pipe_code' must be provided when 'plx_content' is None" + msg = "Either provide pipe_code or mthds_content to the pipeline API. 'pipe_code' must be provided when 'mthds_content' is None" raise PipeExecutionError(message=msg) pipe_code = pipe.code diff --git a/pipelex/pipeline/runner.py b/pipelex/pipeline/runner.py new file mode 100644 index 000000000..1e124df6c --- /dev/null +++ b/pipelex/pipeline/runner.py @@ -0,0 +1,240 @@ +from __future__ import annotations + +from datetime import UTC, datetime +from pathlib import Path +from typing import TYPE_CHECKING, Any, cast + +from mthds.pipeline import PipelineState +from mthds.protocol import RunnerProtocol +from pydantic import ValidationError +from typing_extensions import override + +from pipelex.base_exceptions import PipelexError +from pipelex.config import get_config +from pipelex.graph.graph_tracer_manager import GraphTracerManager +from pipelex.hub import ( + get_library_manager, + get_pipe_router, + get_telemetry_manager, + teardown_current_library, +) +from pipelex.pipe_run.exceptions import PipeRouterError +from pipelex.pipeline.exceptions import PipeExecutionError, PipelineExecutionError +from pipelex.pipeline.pipeline_response import PipelexPipelineExecuteResponse +from pipelex.pipeline.pipeline_run_setup import pipeline_run_setup +from pipelex.system.telemetry.events import EventName, EventProperty, Outcome +from pipelex.tools.typing.pydantic_utils import format_pydantic_validation_error + +if TYPE_CHECKING: + import asyncio + + from mthds.models.pipe_output import VariableMultiplicity + from mthds.models.pipeline_inputs import PipelineInputs + from mthds.models.working_memory import WorkingMemoryAbstract + from mthds.pipeline import PipelineStartResponse + + from pipelex.core.memory.working_memory import WorkingMemory + from pipelex.core.pipes.pipe_output import PipeOutput + from pipelex.pipe_run.pipe_job import PipeJob + from pipelex.pipe_run.pipe_run_mode import PipeRunMode + from pipelex.system.configuration.configs import PipelineExecutionConfig + + +class PipelexRunner(RunnerProtocol["PipeOutput"]): + """Pipelex implementation of the mthds RunnerProtocol. + + Adapts pipelex pipeline execution to the mthds protocol interface. + Pipelex-specific configuration (library directories, run mode, etc.) + is provided at construction time. + """ + + def __init__( + self, + library_id: str | None = None, + library_dirs: list[str] | None = None, + bundle_uri: str | None = None, + pipe_run_mode: PipeRunMode | None = None, + search_domain_codes: list[str] | None = None, + user_id: str | None = None, + execution_config: PipelineExecutionConfig | None = None, + ): + self.library_id = library_id + self.library_dirs = library_dirs + self.bundle_uri = bundle_uri + self.pipe_run_mode = pipe_run_mode + self.search_domain_codes = search_domain_codes + self.user_id = user_id + self.execution_config = execution_config + self._running_tasks: dict[str, asyncio.Task[PipeOutput]] = {} + + @override + async def execute_pipeline( + self, + pipe_code: str | None = None, + mthds_content: str | None = None, + inputs: PipelineInputs | WorkingMemoryAbstract[Any] | None = None, + output_name: str | None = None, + output_multiplicity: VariableMultiplicity | None = None, + dynamic_output_concept_code: str | None = None, + ) -> PipelexPipelineExecuteResponse: + """Execute a pipeline and wait for its completion. + + This method executes a pipe and returns its output. Unlike ``start_pipeline``, + this method waits for the pipe execution to complete before returning. + + Pipelex-specific configuration (library directories, run mode, etc.) is provided + at construction time via the ``PipelexRunner`` constructor. + + Parameters + ---------- + pipe_code: + Code identifying the pipe to execute. Required when ``mthds_content`` is not + provided. When both ``mthds_content`` and ``pipe_code`` are provided, the + specified pipe from the PLX content will be executed (overriding any + ``main_pipe`` defined in the content). + mthds_content: + Complete PLX file content as a string. The pipe to execute is determined by + ``pipe_code`` (if provided) or the ``main_pipe`` property in the PLX content. + Can be combined with ``library_dirs`` to load additional definitions. + inputs: + Inputs passed to the pipeline. Can be either a ``PipelineInputs`` dictionary + or a ``WorkingMemory`` instance. + output_name: + Name of the output slot to write to. + output_multiplicity: + Output multiplicity specification. + dynamic_output_concept_code: + Override the dynamic output concept code. + + Returns: + ------- + PipelexPipelineExecuteResponse + The pipeline execution response wrapping the pipe output, including + pipeline run ID, timestamps, and pipeline state. If ``generate_graph`` + was True, the execution graph is available in the pipe output's + ``graph_spec``. + + """ + created_at = datetime.now(UTC).isoformat() + + # Use provided config or get default + execution_config = self.execution_config or get_config().pipelex.pipeline_execution_config + + # If plx_content is not provided but bundle_uri points to a file, read it + plx_content = mthds_content + if plx_content is None and self.bundle_uri is not None: + bundle_path = Path(self.bundle_uri) + if bundle_path.is_file(): + plx_content = bundle_path.read_text(encoding="utf-8") + + # Cast inputs: the protocol accepts WorkingMemoryAbstract but pipelex expects WorkingMemory + pipelex_inputs: PipelineInputs | WorkingMemory | None = cast("PipelineInputs | WorkingMemory | None", inputs) + + properties: dict[EventProperty, Any] + graph_spec_result = None + # These variables are set in pipeline_run_setup and needed in finally/except blocks + pipeline_run_id: str | None = None + library_id_resolved: str | None = None + pipe_job: PipeJob | None = None + try: + pipe_job, pipeline_run_id, library_id_resolved = await pipeline_run_setup( + execution_config=execution_config, + library_id=self.library_id, + library_dirs=self.library_dirs, + pipe_code=pipe_code, + mthds_content=mthds_content, + bundle_uri=self.bundle_uri, + inputs=pipelex_inputs, + output_name=output_name, + output_multiplicity=output_multiplicity, + dynamic_output_concept_code=dynamic_output_concept_code, + pipe_run_mode=self.pipe_run_mode, + search_domain_codes=self.search_domain_codes, + user_id=self.user_id, + ) + pipe_output = await get_pipe_router().run(pipe_job) + except PipeRouterError as exc: + # PipeRouterError can only be raised by get_pipe_router().run(), so pipe_job is guaranteed to exist + assert pipe_job is not None # for type checker + properties = { + EventProperty.PIPELINE_RUN_ID: pipeline_run_id, + EventProperty.PIPE_TYPE: pipe_job.pipe.pipe_type, + EventProperty.PIPELINE_OUTCOME: Outcome.FAILURE, + } + get_telemetry_manager().track_event(event_name=EventName.PIPELINE_COMPLETE, properties=properties) + raise PipelineExecutionError( + message=exc.message, + run_mode=pipe_job.pipe_run_params.run_mode, + pipe_code=pipe_job.pipe.code, + output_name=pipe_job.output_name, + pipe_stack=pipe_job.pipe_run_params.pipe_stack, + ) from exc + except PipelexError as exc: + # Catch other Pipelex errors that bypass the router's PipeRunError handling + # (e.g., PipeRunInputsError raised directly from pipe_abstract.py) + # If pipe_job is None, the error occurred during pipeline_run_setup before pipe_job was created + if pipe_job is None: + raise + properties = { + EventProperty.PIPELINE_RUN_ID: pipeline_run_id, + EventProperty.PIPE_TYPE: pipe_job.pipe.pipe_type, + EventProperty.PIPELINE_OUTCOME: Outcome.FAILURE, + } + get_telemetry_manager().track_event(event_name=EventName.PIPELINE_COMPLETE, properties=properties) + raise PipelineExecutionError( + message=exc.message, + run_mode=pipe_job.pipe_run_params.run_mode, + pipe_code=pipe_job.pipe.code, + output_name=pipe_job.output_name, + pipe_stack=pipe_job.pipe_run_params.pipe_stack, + ) from exc + except ValidationError as exc: + formatted_error = format_pydantic_validation_error(exc) + model_name = exc.title + msg = f"Input validation failed for '{model_name}': {formatted_error}" + raise PipeExecutionError(message=msg) from exc + finally: + # Close graph tracer if it was opened (capture graph even on failure) + # pipeline_run_id may be None if pipeline_run_setup failed early + if execution_config.is_generate_graph and pipeline_run_id is not None: + tracer_manager = GraphTracerManager.get_instance() + if tracer_manager is not None: + graph_spec_result = tracer_manager.close_tracer(pipeline_run_id) + + # Only teardown library if it was successfully created + if library_id_resolved is not None: + library = get_library_manager().get_library(library_id=library_id_resolved) + library.teardown() + teardown_current_library() + + # Assign graph spec to output (only reached on success, when pipe_output is bound) + if graph_spec_result is not None: + pipe_output.graph_spec = graph_spec_result + + properties = { + EventProperty.PIPELINE_RUN_ID: pipeline_run_id, + EventProperty.PIPE_TYPE: pipe_job.pipe.pipe_type, + EventProperty.PIPELINE_OUTCOME: Outcome.SUCCESS, + } + get_telemetry_manager().track_event(event_name=EventName.PIPELINE_COMPLETE, properties=properties) + + finished_at = datetime.now(UTC).isoformat() + return PipelexPipelineExecuteResponse.from_pipe_output( + pipe_output=pipe_output, + pipeline_run_id=pipe_output.pipeline_run_id, + created_at=created_at, + pipeline_state=PipelineState.COMPLETED, + finished_at=finished_at, + ) + + @override + async def start_pipeline( + self, + pipe_code: str | None = None, + mthds_content: str | None = None, + inputs: PipelineInputs | WorkingMemoryAbstract[Any] | None = None, + output_name: str | None = None, + output_multiplicity: VariableMultiplicity | None = None, + dynamic_output_concept_code: str | None = None, + ) -> PipelineStartResponse[PipeOutput]: + raise NotImplementedError diff --git a/pipelex/pipeline/start.py b/pipelex/pipeline/start.py deleted file mode 100644 index f21de865c..000000000 --- a/pipelex/pipeline/start.py +++ /dev/null @@ -1,124 +0,0 @@ -import asyncio -from pathlib import Path - -from pipelex.client.protocol import PipelineInputs -from pipelex.config import get_config -from pipelex.core.memory.working_memory import WorkingMemory -from pipelex.core.pipes.pipe_output import PipeOutput -from pipelex.hub import get_pipe_router -from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipe_run.pipe_run_params import VariableMultiplicity -from pipelex.pipeline.pipeline_run_setup import pipeline_run_setup -from pipelex.system.configuration.configs import PipelineExecutionConfig - - -async def start_pipeline( - library_id: str | None = None, - library_dirs: list[str] | None = None, - pipe_code: str | None = None, - plx_content: str | None = None, - bundle_uri: str | None = None, - inputs: PipelineInputs | WorkingMemory | None = None, - output_name: str | None = None, - output_multiplicity: VariableMultiplicity | None = None, - dynamic_output_concept_code: str | None = None, - pipe_run_mode: PipeRunMode | None = None, - search_domain_codes: list[str] | None = None, - user_id: str | None = None, - execution_config: PipelineExecutionConfig | None = None, -) -> tuple[str, asyncio.Task[PipeOutput]]: - """Start a pipeline in the background. - - This function mirrors ``execute_pipeline`` but returns immediately with the - ``pipeline_run_id`` and a task instead of waiting for the pipe run to complete. - The actual execution is scheduled on the current event loop using - ``asyncio.create_task``. - - Parameters - ---------- - library_id: - Unique identifier for the library instance. If not provided, defaults to the - auto-generated ``pipeline_run_id``. Use a custom ID when you need to manage - multiple library instances or maintain library state across executions. - library_dirs: - List of directory paths to load pipe definitions from. Combined with directories - from the ``PIPELEXPATH`` environment variable (PIPELEXPATH directories are searched - first). When provided alongside ``plx_content``, definitions from both sources - are loaded into the library. - pipe_code: - Code identifying the pipe to execute. Required when ``plx_content`` is not - provided. When both ``plx_content`` and ``pipe_code`` are provided, the - specified pipe from the PLX content will be executed (overriding any - ``main_pipe`` defined in the content). - plx_content: - Complete PLX file content as a string. The pipe to execute is determined by - ``pipe_code`` (if provided) or the ``main_pipe`` property in the PLX content. - Can be combined with ``library_dirs`` to load additional definitions. - bundle_uri: - URI identifying the bundle. If ``plx_content`` is not provided and ``bundle_uri`` - points to a local file path, the content will be read from that file. Also used - to detect if the bundle was already loaded from library directories (e.g., via - PIPELEXPATH) to avoid duplicate domain registration. - inputs: - Inputs passed to the pipeline. Can be either a ``PipelineInputs`` dictionary - or a ``WorkingMemory`` instance. - output_name: - Name of the output slot to write to. - output_multiplicity: - Output multiplicity specification. - dynamic_output_concept_code: - Override the dynamic output concept code. - pipe_run_mode: - Pipe run mode: ``PipeRunMode.LIVE`` or ``PipeRunMode.DRY``. If not specified, - inferred from the environment variable ``PIPELEX_FORCE_DRY_RUN_MODE``. Defaults - to ``PipeRunMode.LIVE`` if the environment variable is not set. - search_domain_codes: - List of domain codes to search for pipes. The executed pipe's domain is automatically - added if not already present. - user_id: - Unique identifier for the user. - execution_config: - Pipeline execution configuration including graph tracing settings. - If not provided, uses the default from - ``get_config().pipelex.pipeline_execution_config``. Use the ``mock_inputs`` - field to generate mock data for missing required inputs during dry-run. - Since this function returns immediately, the caller is responsible for calling - ``GraphTracerManager.get_instance().close_tracer(pipeline_run_id)`` - after the task completes to retrieve the GraphSpec. - - Returns: - ------- - tuple[str, asyncio.Task[PipeOutput]] - The ``pipeline_run_id`` of the newly started pipeline and a task that - can be awaited to get the pipe output. - - """ - # Use provided config or get default - execution_config = execution_config or get_config().pipelex.pipeline_execution_config - - # If plx_content is not provided but bundle_uri points to a file, read it - if plx_content is None and bundle_uri is not None: - bundle_path = Path(bundle_uri) - if bundle_path.is_file(): - plx_content = bundle_path.read_text(encoding="utf-8") - - # TODO: make sure we close the graph tracer after the task completes - pipe_job, pipeline_run_id, _library_id = await pipeline_run_setup( - execution_config=execution_config, - library_id=library_id, - library_dirs=library_dirs, - pipe_code=pipe_code, - plx_content=plx_content, - bundle_uri=bundle_uri, - inputs=inputs, - output_name=output_name, - output_multiplicity=output_multiplicity, - dynamic_output_concept_code=dynamic_output_concept_code, - pipe_run_mode=pipe_run_mode, - search_domain_codes=search_domain_codes, - user_id=user_id, - ) - - task: asyncio.Task[PipeOutput] = asyncio.create_task(get_pipe_router().run(pipe_job)) - - return pipeline_run_id, task diff --git a/pipelex/pipeline/validate_bundle.py b/pipelex/pipeline/validate_bundle.py index e7a068300..902b3489b 100644 --- a/pipelex/pipeline/validate_bundle.py +++ b/pipelex/pipeline/validate_bundle.py @@ -20,7 +20,7 @@ from pipelex.core.pipes.pipe_abstract import PipeAbstract from pipelex.core.validation import report_validation_error from pipelex.hub import get_library_manager, resolve_library_dirs, set_current_library -from pipelex.libraries.library_utils import get_pipelex_plx_files_from_dirs +from pipelex.libraries.library_utils import get_pipelex_mthds_files_from_dirs from pipelex.pipe_run.dry_run import DryRunError, DryRunOutput, dry_run_pipes from pipelex.pipe_run.exceptions import PipeRunError @@ -84,17 +84,17 @@ class ValidateBundleResult(BaseModel): async def validate_bundle( - plx_file_path: Path | None = None, - plx_content: str | None = None, + mthds_file_path: Path | None = None, + mthds_content: str | None = None, blueprints: list[PipelexBundleBlueprint] | None = None, library_dirs: Sequence[Path] | None = None, ) -> ValidateBundleResult: - provided_params = sum([blueprints is not None, plx_content is not None, plx_file_path is not None]) + provided_params = sum([blueprints is not None, mthds_content is not None, mthds_file_path is not None]) if provided_params == 0: - msg = "At least one of blueprints, plx_content, or plx_file_path must be provided to validate_bundle" + msg = "At least one of blueprints, mthds_content, or mthds_file_path must be provided to validate_bundle" raise ValidateBundleError(message=msg) if provided_params > 1: - msg = "Only one of blueprints, plx_content, or plx_file_path can be provided to validate_bundle, not multiple" + msg = "Only one of blueprints, mthds_content, or mthds_file_path can be provided to validate_bundle, not multiple" raise ValidateBundleError(message=msg) library_manager = get_library_manager() @@ -121,19 +121,19 @@ async def validate_bundle( dry_run_results = await dry_run_pipes(pipes=loaded_pipes, raise_on_failure=True) return ValidateBundleResult(blueprints=loaded_blueprints, pipes=loaded_pipes, dry_run_result=dry_run_results) - elif plx_content is not None: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + elif mthds_content is not None: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) loaded_blueprints = [blueprint] loaded_pipes = library_manager.load_from_blueprints(library_id=library_id, blueprints=[blueprint]) dry_run_results = await dry_run_pipes(pipes=loaded_pipes, raise_on_failure=True) return ValidateBundleResult(blueprints=loaded_blueprints, pipes=loaded_pipes, dry_run_result=dry_run_results) else: - assert plx_file_path is not None - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) + assert mthds_file_path is not None + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file_path) loaded_blueprints = [blueprint] - if plx_file_path.resolve() not in library.loaded_plx_paths: + if mthds_file_path.resolve() not in library.loaded_mthds_paths: # File not yet loaded - load it from the blueprint loaded_pipes = library_manager.load_from_blueprints(library_id=library_id, blueprints=[blueprint]) else: @@ -163,7 +163,7 @@ async def validate_bundle( ) from pipe_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, @@ -182,15 +182,15 @@ async def validate_bundle( async def validate_bundles_from_directory(directory: Path) -> ValidateBundleResult: - plx_files = get_pipelex_plx_files_from_dirs(dirs={directory}) + mthds_files = get_pipelex_mthds_files_from_dirs(dirs={directory}) all_blueprints: list[PipelexBundleBlueprint] = [] library_manager = get_library_manager() library_id, _ = library_manager.open_library() set_current_library(library_id=library_id) try: - for plx_file in plx_files: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file) + for mthds_file in mthds_files: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file) all_blueprints.append(blueprint) loaded_pipes = library_manager.load_libraries(library_id=library_id, library_dirs=[Path(directory)]) @@ -214,7 +214,7 @@ async def validate_bundles_from_directory(directory: Path) -> ValidateBundleResu ) from pipe_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, @@ -234,29 +234,29 @@ async def validate_bundles_from_directory(directory: Path) -> ValidateBundleResu class LoadConceptsOnlyResult(BaseModel): - """Result of loading PLX files with concepts only (no pipes).""" + """Result of loading MTHDS files with concepts only (no pipes).""" blueprints: list[PipelexBundleBlueprint] concepts: list[Concept] def load_concepts_only( - plx_file_path: Path | None = None, - plx_content: str | None = None, + mthds_file_path: Path | None = None, + mthds_content: str | None = None, blueprints: list[PipelexBundleBlueprint] | None = None, library_dirs: Sequence[Path] | None = None, ) -> LoadConceptsOnlyResult: - """Load PLX files processing only domains and concepts, skipping pipes. + """Load MTHDS files processing only domains and concepts, skipping pipes. This is a lightweight alternative to validate_bundle() that only processes domains and concepts. It does not load pipes, does not perform pipe validation, and does not run dry runs. Args: - plx_file_path: Path to a single PLX file to load (mutually exclusive with others) - plx_content: PLX content string to load (mutually exclusive with others) + mthds_file_path: Path to a single MTHDS file to load (mutually exclusive with others) + mthds_content: MTHDS content string to load (mutually exclusive with others) blueprints: Pre-parsed blueprints to load (mutually exclusive with others) - library_dirs: Optional directories containing additional PLX library files + library_dirs: Optional directories containing additional MTHDS library files Returns: LoadConceptsOnlyResult with blueprints and loaded concepts @@ -264,12 +264,12 @@ def load_concepts_only( Raises: ValidateBundleError: If loading fails due to interpreter or validation errors """ - provided_params = sum([blueprints is not None, plx_content is not None, plx_file_path is not None]) + provided_params = sum([blueprints is not None, mthds_content is not None, mthds_file_path is not None]) if provided_params == 0: - msg = "At least one of blueprints, plx_content, or plx_file_path must be provided to load_concepts_only" + msg = "At least one of blueprints, mthds_content, or mthds_file_path must be provided to load_concepts_only" raise ValidateBundleError(message=msg) if provided_params > 1: - msg = "Only one of blueprints, plx_content, or plx_file_path can be provided to load_concepts_only, not multiple" + msg = "Only one of blueprints, mthds_content, or mthds_file_path can be provided to load_concepts_only, not multiple" raise ValidateBundleError(message=msg) library_manager = get_library_manager() @@ -296,18 +296,18 @@ def load_concepts_only( loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=blueprints) return LoadConceptsOnlyResult(blueprints=loaded_blueprints, concepts=loaded_concepts) - elif plx_content is not None: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + elif mthds_content is not None: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) loaded_blueprints = [blueprint] loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=[blueprint]) return LoadConceptsOnlyResult(blueprints=loaded_blueprints, concepts=loaded_concepts) else: - assert plx_file_path is not None - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) + assert mthds_file_path is not None + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file_path) loaded_blueprints = [blueprint] - if plx_file_path.resolve() not in library.loaded_plx_paths: + if mthds_file_path.resolve() not in library.loaded_mthds_paths: # File not yet loaded - load it from the blueprint loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=[blueprint]) else: @@ -324,7 +324,7 @@ def load_concepts_only( ) from interpreter_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, @@ -333,14 +333,14 @@ def load_concepts_only( def load_concepts_only_from_directory(directory: Path) -> LoadConceptsOnlyResult: - """Load PLX files from a directory, processing only domains and concepts, skipping pipes. + """Load MTHDS files from a directory, processing only domains and concepts, skipping pipes. This is a lightweight alternative to validate_bundles_from_directory() that only processes domains and concepts. It does not load pipes, does not perform pipe validation, and does not run dry runs. Args: - directory: Directory containing PLX files to load + directory: Directory containing MTHDS files to load Returns: LoadConceptsOnlyResult with blueprints and loaded concepts @@ -348,15 +348,15 @@ def load_concepts_only_from_directory(directory: Path) -> LoadConceptsOnlyResult Raises: ValidateBundleError: If loading fails due to interpreter or validation errors """ - plx_files = get_pipelex_plx_files_from_dirs(dirs={directory}) + mthds_files = get_pipelex_mthds_files_from_dirs(dirs={directory}) all_blueprints: list[PipelexBundleBlueprint] = [] library_manager = get_library_manager() library_id, _ = library_manager.open_library() set_current_library(library_id=library_id) try: - for plx_file in plx_files: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file) + for mthds_file in mthds_files: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file) all_blueprints.append(blueprint) loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=all_blueprints) @@ -367,7 +367,7 @@ def load_concepts_only_from_directory(directory: Path) -> LoadConceptsOnlyResult ) from interpreter_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, diff --git a/pipelex/system/configuration/config_check.py b/pipelex/system/configuration/config_check.py index 52ea59b80..d9a46aeb6 100644 --- a/pipelex/system/configuration/config_check.py +++ b/pipelex/system/configuration/config_check.py @@ -2,14 +2,14 @@ from pipelex.cli.commands.init.config_files import init_config from pipelex.hub import get_console -from pipelex.system.configuration.configs import ConfigPaths +from pipelex.system.configuration.config_loader import config_manager from pipelex.tools.misc.file_utils import path_exists from pipelex.urls import URLs def check_is_initialized(print_warning_if_not: bool = True) -> bool: - backends_toml_path = ConfigPaths.BACKENDS_FILE_PATH - routing_profiles_toml_path = ConfigPaths.ROUTING_PROFILES_FILE_PATH + backends_toml_path = config_manager.backends_file_path + routing_profiles_toml_path = config_manager.routing_profiles_file_path # Check critical files config_exists = init_config(reset=False, dry_run=True) == 0 diff --git a/pipelex/system/configuration/config_loader.py b/pipelex/system/configuration/config_loader.py index ebf1aba00..94490cda3 100644 --- a/pipelex/system/configuration/config_loader.py +++ b/pipelex/system/configuration/config_loader.py @@ -1,4 +1,5 @@ import os +import shutil from pathlib import Path from typing import Any @@ -8,6 +9,14 @@ CONFIG_DIR_NAME = ".pipelex" CONFIG_NAME = "pipelex.toml" +PROJECT_ROOT_MARKERS: frozenset[str] = frozenset({".git", "pyproject.toml", "setup.py", "setup.cfg", "package.json", ".hg"}) + +INFERENCE_DIR_NAME = "inference" +BACKENDS_FILE_NAME = "backends.toml" +BACKENDS_DIR_NAME = "backends" +ROUTING_PROFILES_FILE_NAME = "routing_profiles.toml" +MODEL_DECKS_DIR_NAME = "deck" + class ConfigLoader: @property @@ -19,48 +28,172 @@ def pipelex_root_dir(self) -> str: """ return str(Path(__file__).resolve().parent.parent.parent) + @staticmethod + def find_project_root(start_dir: Path) -> Path | None: + """Walk up from start_dir looking for project root markers. + + Returns the directory containing the marker, or None if not found. + """ + current = start_dir.resolve() + while True: + for marker in PROJECT_ROOT_MARKERS: + if (current / marker).exists(): + return current + parent = current.parent + if parent == current: + return None + current = parent + + @property + def global_config_dir(self) -> str: + """Get the global config directory at ~/.pipelex.""" + return str(Path.home() / CONFIG_DIR_NAME) + + @property + def project_root(self) -> str | None: + """Get the detected project root directory, or None if no project root markers found.""" + project_root = self.find_project_root(Path.cwd()) + if project_root is None: + return None + return str(project_root) + + @property + def project_config_dir(self) -> str | None: + """Get the project config directory if it exists on disk. + + Returns the path to {project_root}/.pipelex if the project root was found + and the .pipelex directory exists there, otherwise None. + """ + project_root = self.find_project_root(Path.cwd()) + if project_root is None: + return None + project_config = project_root / CONFIG_DIR_NAME + if project_config.is_dir(): + return str(project_config) + return None + @property def pipelex_config_dir(self) -> str: - return os.path.join(os.getcwd(), CONFIG_DIR_NAME) + """Get the effective config directory (project if exists, else global). + + This preserves backwards compatibility for all current consumers. + """ + project_dir = self.project_config_dir + if project_dir is not None: + return project_dir + return self.global_config_dir + + def _resolve_inference_file(self, relative_path: str) -> str: + """Resolve an inference file path, checking project dir first, then global. + + Args: + relative_path: Path relative to the .pipelex directory (e.g. "inference/backends.toml"). + + Returns: + The resolved absolute path. + """ + project_dir = self.project_config_dir + if project_dir is not None: + candidate = os.path.join(project_dir, relative_path) + if os.path.exists(candidate): + return candidate + return os.path.join(self.global_config_dir, relative_path) + + @property + def backends_file_path(self) -> str: + """Resolve backends.toml from project dir or global dir.""" + return self._resolve_inference_file(os.path.join(INFERENCE_DIR_NAME, BACKENDS_FILE_NAME)) + + @property + def backends_dir_path(self) -> str: + """Resolve backends/ directory from project dir or global dir.""" + return self._resolve_inference_file(os.path.join(INFERENCE_DIR_NAME, BACKENDS_DIR_NAME)) + + @property + def routing_profiles_file_path(self) -> str: + """Resolve routing_profiles.toml from project dir or global dir.""" + return self._resolve_inference_file(os.path.join(INFERENCE_DIR_NAME, ROUTING_PROFILES_FILE_NAME)) + + @property + def model_decks_dir_path(self) -> str: + """Resolve model decks directory from project dir or global dir.""" + return self._resolve_inference_file(os.path.join(INFERENCE_DIR_NAME, MODEL_DECKS_DIR_NAME)) + + def ensure_global_config_exists(self) -> None: + """Create the global ~/.pipelex/ directory with kit template files if it doesn't exist.""" + global_dir = Path(self.global_config_dir) + if global_dir.is_dir(): + return + + from pipelex.kit.paths import GIT_IGNORED_CONFIG_FILES, get_kit_configs_dir # noqa: PLC0415 + + config_template_dir = str(get_kit_configs_dir()) + global_dir_str = str(global_dir) + os.makedirs(global_dir_str, exist_ok=True) + + def copy_directory_structure(src_dir: str, dst_dir: str) -> None: + """Recursively copy directory structure from kit templates.""" + for item in os.listdir(src_dir): + if item in GIT_IGNORED_CONFIG_FILES or item == ".DS_Store": + continue + src_item = os.path.join(src_dir, item) + dst_item = os.path.join(dst_dir, item) + if os.path.isdir(src_item): + os.makedirs(dst_item, exist_ok=True) + copy_directory_structure(src_item, dst_item) + else: + shutil.copy2(src_item, dst_item) + + copy_directory_structure(src_dir=config_template_dir, dst_dir=global_dir_str) def load_config(self) -> dict[str, Any]: """Load and merge configurations from pipelex and local config files. The configuration is loaded and merged in the following order: - 1. Base pipelex config (pipelex.toml) - 2. Local project config (pipelex.toml) if not in pipelex package - 3. Override configs in sequence: + 1. Base pipelex config (pipelex/pipelex.toml β€” package defaults) + 2. Global config (~/.pipelex/pipelex.toml) + 3. Project config ({project_root}/.pipelex/pipelex.toml, if found) + 4. Override configs from effective config dir in sequence: - pipelex_local.toml (local execution) - pipelex_{environment}.toml - pipelex_{run_mode}.toml - pipelex_override.toml (final override) Returns: - Dict[str, Any]: The merged configuration dictionary - + dict[str, Any]: The merged configuration dictionary """ + self.ensure_global_config_exists() + list_of_configs: list[str] = [] - # Pipelex base config + # 1. Pipelex package defaults list_of_configs.append(os.path.join(self.pipelex_root_dir, CONFIG_NAME)) - # Current project overrides - list_of_configs.append(os.path.join(self.pipelex_config_dir, CONFIG_NAME)) + # 2. Global config + list_of_configs.append(os.path.join(self.global_config_dir, CONFIG_NAME)) + + # 3. Project config (if different from global) + project_dir = self.project_config_dir + if project_dir is not None and project_dir != self.global_config_dir: + list_of_configs.append(os.path.join(project_dir, CONFIG_NAME)) + + # Effective config dir for overrides + effective_config_dir = self.pipelex_config_dir - # Override for local execution - list_of_configs.append(os.path.join(self.pipelex_config_dir, "pipelex_local.toml")) + # 4. Override for local execution + list_of_configs.append(os.path.join(effective_config_dir, "pipelex_local.toml")) # Override for environment - list_of_configs.append(os.path.join(self.pipelex_config_dir, f"pipelex_{runtime_manager.environment}.toml")) + list_of_configs.append(os.path.join(effective_config_dir, f"pipelex_{runtime_manager.environment}.toml")) # Override for run mode if runtime_manager.is_unit_testing: list_of_configs.append(os.path.join(os.getcwd(), "tests", f"pipelex_{runtime_manager.run_mode}.toml")) else: - list_of_configs.append(os.path.join(self.pipelex_config_dir, f"pipelex_{runtime_manager.run_mode}.toml")) + list_of_configs.append(os.path.join(effective_config_dir, f"pipelex_{runtime_manager.run_mode}.toml")) # Final override - list_of_configs.append(os.path.join(self.pipelex_config_dir, "pipelex_override.toml")) + list_of_configs.append(os.path.join(effective_config_dir, "pipelex_override.toml")) return load_toml_from_path_and_merge_with_overrides(paths=list_of_configs) diff --git a/pipelex/system/configuration/configs.py b/pipelex/system/configuration/configs.py index d4b1c5880..bb629acf9 100644 --- a/pipelex/system/configuration/configs.py +++ b/pipelex/system/configuration/configs.py @@ -6,7 +6,7 @@ from pipelex.cogt.model_backends.prompting_target import PromptingTarget from pipelex.cogt.templating.templating_style import TemplatingStyle from pipelex.graph.graph_config import GraphConfig -from pipelex.language.plx_config import PlxConfig +from pipelex.language.mthds_config import MthdsConfig from pipelex.system.configuration.config_model import ConfigModel from pipelex.system.configuration.config_root import ConfigRoot from pipelex.tools.aws.aws_config import AwsConfig @@ -16,21 +16,6 @@ class ConfigPaths: - DEFAULT_CONFIG_DIR_PATH = "./.pipelex" - INFERENCE_DIR_NAME = "inference" - INFERENCE_DIR_PATH = f"{DEFAULT_CONFIG_DIR_PATH}/{INFERENCE_DIR_NAME}" - BACKENDS_FILE_NAME = "backends.toml" - BACKENDS_FILE_PATH = f"{INFERENCE_DIR_PATH}/{BACKENDS_FILE_NAME}" - BACKENDS_DIR_NAME = "backends" - BACKENDS_DIR_PATH = f"{INFERENCE_DIR_PATH}/{BACKENDS_DIR_NAME}" - ROUTING_PROFILES_FILE_NAME = "routing_profiles.toml" - ROUTING_PROFILES_FILE_PATH = f"{INFERENCE_DIR_PATH}/{ROUTING_PROFILES_FILE_NAME}" - MODEL_DECKS_DIR_NAME = "deck" - MODEL_DECKS_DIR_PATH = f"{INFERENCE_DIR_PATH}/{MODEL_DECKS_DIR_NAME}" - BASE_DECK_FILE_NAME = "base_deck.toml" - BASE_DECK_FILE_PATH = f"{MODEL_DECKS_DIR_PATH}/{BASE_DECK_FILE_NAME}" - OVERRIDES_DECK_FILE_NAME = "overrides.toml" - OVERRIDES_DECK_FILE_PATH = f"{MODEL_DECKS_DIR_PATH}/{OVERRIDES_DECK_FILE_NAME}" # Dev-only config (not synced with kit) DEV_CONFIG_DIR_PATH = "./.pipelex-dev" @@ -184,7 +169,7 @@ class Pipelex(ConfigModel): structure_config: StructureConfig prompting_config: PromptingConfig - plx_config: PlxConfig + mthds_config: MthdsConfig dry_run_config: DryRunConfig pipe_run_config: PipeRunConfig diff --git a/pipelex/system/pipelex_service/pipelex_service_config.py b/pipelex/system/pipelex_service/pipelex_service_config.py index 7efceb098..4cbee38e4 100644 --- a/pipelex/system/pipelex_service/pipelex_service_config.py +++ b/pipelex/system/pipelex_service/pipelex_service_config.py @@ -4,8 +4,8 @@ from pydantic import ValidationError from pipelex.cogt.model_backends.backend import PipelexBackend +from pipelex.system.configuration.config_loader import config_manager from pipelex.system.configuration.config_model import ConfigModel -from pipelex.system.configuration.configs import ConfigPaths from pipelex.system.pipelex_service.exceptions import PipelexServiceConfigValidationError from pipelex.system.pipelex_service.pipelex_service_agreement import ( PIPELEX_SERVICE_CONFIG_FILE_NAME, @@ -48,7 +48,7 @@ def is_pipelex_gateway_enabled() -> bool: Returns: True if pipelex_gateway is enabled, False otherwise. """ - backends_toml = load_toml_from_path_if_exists(ConfigPaths.BACKENDS_FILE_PATH) + backends_toml = load_toml_from_path_if_exists(config_manager.backends_file_path) if backends_toml is None: return False diff --git a/pipelex/system/telemetry/otel_constants.py b/pipelex/system/telemetry/otel_constants.py index 4e934e16e..69c314719 100644 --- a/pipelex/system/telemetry/otel_constants.py +++ b/pipelex/system/telemetry/otel_constants.py @@ -108,7 +108,7 @@ def make_otel_gen_ai_output_type(output_type: str) -> otel_gen_ai_attributes.Gen class PipelexSpanAttr(StrEnum): - """Pipelex-specific span attribute keys for workflow tracing.""" + """Pipelex-specific span attribute keys for method tracing.""" TRACE_NAME = "pipelex.trace.name" TRACE_NAME_REDACTED = "pipelex.trace.name.redacted" diff --git a/pipelex/tools/misc/semver.py b/pipelex/tools/misc/semver.py new file mode 100644 index 000000000..ea99486a3 --- /dev/null +++ b/pipelex/tools/misc/semver.py @@ -0,0 +1,132 @@ +# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false, reportUnknownParameterType=false, reportUnknownArgumentType=false +"""Thin typed wrapper around semantic_version for semver constraint evaluation. + +Provides parsing, constraint matching, and Minimum Version Selection (MVS) for +the MTHDS package dependency system. + +Note: semantic_version has no type stubs, so Pyright unknown-type checks are +disabled at file level for this wrapper module. +""" + +from semantic_version import SimpleSpec, Version # type: ignore[import-untyped] + + +class SemVerError(Exception): + """Raised for semver parse failures.""" + + +def parse_version(version_str: str) -> Version: + """Parse a version string into a semantic_version.Version. + + Strips a leading 'v' prefix if present (common in git tags like v1.2.3). + + Args: + version_str: The version string to parse (e.g. "1.2.3" or "v1.2.3"). + + Returns: + The parsed Version object. + + Raises: + SemVerError: If the version string is not valid semver. + """ + cleaned = version_str.lstrip("v") if version_str.startswith("v") else version_str + try: + return Version(cleaned) + except ValueError as exc: + msg = f"Invalid semver version: {version_str!r}" + raise SemVerError(msg) from exc + + +def parse_constraint(constraint_str: str) -> SimpleSpec: + """Parse a constraint string into a semantic_version.SimpleSpec. + + Args: + constraint_str: The constraint string to parse (e.g. "^1.2.3", ">=1.0.0,<2.0.0"). + + Returns: + The parsed SimpleSpec object. + + Raises: + SemVerError: If the constraint string is not valid. + """ + try: + return SimpleSpec(constraint_str) + except ValueError as exc: + msg = f"Invalid semver constraint: {constraint_str!r}" + raise SemVerError(msg) from exc + + +def version_satisfies(version: Version, constraint: SimpleSpec) -> bool: + """Check whether a version satisfies a constraint. + + Args: + version: The version to check. + constraint: The constraint to check against. + + Returns: + True if the version satisfies the constraint. + """ + result: bool = constraint.match(version) + return result + + +def select_minimum_version( + available_versions: list[Version], + constraint: SimpleSpec, +) -> Version | None: + """Select the minimum version that satisfies a constraint (MVS). + + Implements Go-style Minimum Version Selection for a single dependency: + sorts versions ascending and returns the first match. + + Args: + available_versions: The list of available versions. + constraint: The constraint to satisfy. + + Returns: + The minimum matching version, or None if no version matches. + """ + for version in sorted(available_versions): + if constraint.match(version): + return version + return None + + +def select_minimum_version_for_multiple_constraints( + available_versions: list[Version], + constraints: list[SimpleSpec], +) -> Version | None: + """Select the minimum version that satisfies ALL constraints simultaneously. + + Used for transitive resolution when multiple packages depend on the same + package with different constraints. + + Args: + available_versions: The list of available versions. + constraints: The list of constraints that must all be satisfied. + + Returns: + The minimum version satisfying all constraints, or None if unsatisfiable. + """ + for version in sorted(available_versions): + if all(constraint.match(version) for constraint in constraints): + return version + return None + + +def parse_version_tag(tag: str) -> Version | None: + """Parse a git tag into a Version, returning None if not a valid semver tag. + + Handles tags like "v1.2.3" and "1.2.3", and gracefully ignores non-semver + tags like "release-20240101" or "latest". + + Args: + tag: The git tag string. + + Returns: + The parsed Version, or None if the tag is not valid semver. + """ + try: + return parse_version(tag) + except SemVerError: + return None diff --git a/pyproject.toml b/pyproject.toml index 2b8ef67c6..aa721c8a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,53 +1,55 @@ [project] name = "pipelex" version = "0.18.0b3" -description = "The open standard for repeatable AI workflows. Write business logic, not API calls." +description = "Light client for executing methods" authors = [{ name = "Evotis S.A.S.", email = "oss@pipelex.com" }] maintainers = [{ name = "Pipelex staff", email = "oss@pipelex.com" }] license = "MIT" readme = "README.md" requires-python = ">=3.10,<3.15" classifiers = [ - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python :: 3.14", - "Operating System :: OS Independent", - "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Operating System :: OS Independent", + "License :: OSI Approved :: MIT License", ] dependencies = [ - "aiofiles>=23.2.1", - "backports.strenum>=1.3.0 ; python_version < '3.11'", - "filetype>=1.2.0", - "httpx>=0.23.0,<1.0.0", - "instructor>=1.8.3,!=1.11.*,!=1.12.*", # 1.11.x caused typing errors with mypy - "jinja2>=3.1.4", - "json2html>=1.3.0", - "kajson==0.3.1", - "markdown>=3.6", - "networkx>=3.4.2", - "openai>=1.108.1", - "opentelemetry-api", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-semantic-conventions", - "opentelemetry-sdk", - "pillow>=11.2.1", - "polyfactory>=2.21.0", - "portkey-ai>=2.1.0", - "posthog>=6.7.0", - "pypdfium2>=4.30.0,!=4.30.1,<5.0.0", - "pydantic>=2.10.6,<3.0.0", - "python-dotenv>=1.0.1", - "PyYAML>=6.0.2", - "rich>=13.8.1", - "shortuuid>=1.0.13", - "tomli>=2.3.0", - "tomlkit>=0.13.2", - "typer>=0.16.0", - "typing-extensions>=4.13.2", + "aiofiles>=23.2.1", + "backports.strenum>=1.3.0 ; python_version < '3.11'", + "filetype>=1.2.0", + "httpx>=0.23.0,<1.0.0", + "instructor>=1.8.3,!=1.11.*,!=1.12.*", # 1.11.x caused typing errors with mypy + "jinja2>=3.1.4", + "json2html>=1.3.0", + "kajson==0.3.1", + "markdown>=3.6", + "mthds>=0.0.1", + "networkx>=3.4.2", + "openai>=1.108.1", + "opentelemetry-api", + "opentelemetry-exporter-otlp-proto-http", + "opentelemetry-semantic-conventions", + "opentelemetry-sdk", + "pillow>=11.2.1", + "polyfactory>=2.21.0", + "portkey-ai>=2.1.0", + "posthog>=6.7.0", + "pypdfium2>=4.30.0,!=4.30.1,<5.0.0", + "pydantic>=2.10.6,<3.0.0", + "python-dotenv>=1.0.1", + "PyYAML>=6.0.2", + "rich>=13.8.1", + "semantic-version>=2.10.0", + "shortuuid>=1.0.13", + "tomli>=2.3.0", + "tomlkit>=0.13.2", + "typer>=0.16.0", + "typing-extensions>=4.13.2", ] [project.urls] @@ -68,31 +70,32 @@ huggingface = ["huggingface_hub>=0.23,<1.0.0"] mistralai = ["mistralai>=1.12.0"] s3 = ["boto3>=1.34.131", "aioboto3>=13.4.0"] docs = [ - "mkdocs>=1.6.1", - "mkdocs-glightbox>=0.4.0", - "mkdocs-material>=9.6.14", - "mkdocs-meta-manager>=1.1.0", - "mike>=2.1.3", + "mkdocs>=1.6.1", + "mkdocs-glightbox>=0.4.0", + "mkdocs-material>=9.6.14", + "mkdocs-meta-manager>=1.1.0", + "mike>=2.1.3", ] dev = [ - "boto3-stubs>=1.35.24", - "moto[s3]>=5.0.0", - "mypy==1.19.1", - "pyright==1.1.408", - "pylint==4.0.4", - "pytest>=9.0.2", - "pytest-asyncio>=0.24.0", - "pytest-cov>=6.1.1", - "pytest-mock>=3.14.0", - "pytest-sugar>=1.0.0", - "pytest-xdist>= 3.6.1", - "ruff==0.14.13", - "types-aioboto3[bedrock,bedrock-runtime]>=13.4.0", - "types-aiofiles>=24.1.0.20240626", - "types-markdown>=3.6.0.20240316", - "types-networkx>=3.3.0.20241020", - "types-PyYAML>=6.0.12.20250326", + "boto3-stubs>=1.35.24", + "moto[s3]>=5.0.0", + "mypy==1.19.1", + "pipelex-tools>=0.1.1", + "pyright==1.1.408", + "pylint==4.0.4", + "pytest>=9.0.2", + "pytest-asyncio>=0.24.0", + "pytest-cov>=6.1.1", + "pytest-mock>=3.14.0", + "pytest-sugar>=1.0.0", + "pytest-xdist>= 3.6.1", + "ruff==0.14.13", + "types-aioboto3[bedrock,bedrock-runtime]>=13.4.0", + "types-aiofiles>=24.1.0.20240626", + "types-markdown>=3.6.0.20240316", + "types-networkx>=3.3.0.20241020", + "types-PyYAML>=6.0.12.20250326", ] [project.scripts] @@ -117,11 +120,11 @@ warn_unused_configs = true [[tool.mypy.overrides]] ignore_missing_imports = true module = [ - "backports.strenum", - "filetype", - "json2html", - "pypdfium2", - "pypdfium2.raw", + "backports.strenum", + "filetype", + "json2html", + "pypdfium2", + "pypdfium2.raw", ] [tool.pyright] @@ -133,7 +136,7 @@ deprecateTypingAliases = false disableBytesTypePromotions = true enableExperimentalFeatures = false enableTypeIgnoreComments = true -extraPaths = ["./tests"] +extraPaths = ["./tests", "../mthds-python"] reportAbstractUsage = "error" reportArgumentType = "error" reportAssertAlwaysTrue = "error" @@ -222,30 +225,30 @@ typeCheckingMode = "strict" [tool.pytest] minversion = "9.0" addopts = [ - "--import-mode=importlib", - "-ra", # Show all test outcomes (including skips) - "-m", - "not (inference or llm or img_gen or extract or pipelex_api)", + "--import-mode=importlib", + "-ra", # Show all test outcomes (including skips) + "-m", + "not (inference or llm or img_gen or extract or pipelex_api)", ] asyncio_default_fixture_loop_scope = "session" xfail_strict = true filterwarnings = [ - "ignore:Support for class-based `config` is deprecated:DeprecationWarning", - "ignore:websockets.*is deprecated:DeprecationWarning", - "ignore:typing\\.io is deprecated:DeprecationWarning", - "ignore:typing\\.re is deprecated:DeprecationWarning", - "ignore:.*has been moved to cryptography.*", - "ignore:Use.*Types instead", + "ignore:Support for class-based `config` is deprecated:DeprecationWarning", + "ignore:websockets.*is deprecated:DeprecationWarning", + "ignore:typing\\.io is deprecated:DeprecationWarning", + "ignore:typing\\.re is deprecated:DeprecationWarning", + "ignore:.*has been moved to cryptography.*", + "ignore:Use.*Types instead", ] markers = [ - "inference: slow and costly due to inference calls", - "llm: slow and costly due to llm inference calls", - "img_gen: slow and costly due to image generation inference calls", - "extract: slow and costly due to doc extraction inference calls", - "gha_disabled: tests that should not run in GitHub Actions", - "codex_disabled: tests that should not run in Codex", - "dry_runnable: tests that can be run in dry-run mode", - "pipelex_api: tests that require access to the Pipelex API", + "inference: slow and costly due to inference calls", + "llm: slow and costly due to llm inference calls", + "img_gen: slow and costly due to image generation inference calls", + "extract: slow and costly due to doc extraction inference calls", + "gha_disabled: tests that should not run in GitHub Actions", + "codex_disabled: tests that should not run in Codex", + "dry_runnable: tests that can be run in dry-run mode", + "pipelex_api: tests that require access to the Pipelex API", ] [tool.coverage.run] @@ -254,24 +257,24 @@ omit = ["tests/*", "**/__init__.py"] [tool.coverage.report] exclude_lines = [ - "pragma: no cover", - "def __repr__", - "raise NotImplementedError", - "if __name__ == .__main__.:", - "pass", - "raise ImportError", + "pragma: no cover", + "def __repr__", + "raise NotImplementedError", + "if __name__ == .__main__.:", + "pass", + "raise ImportError", ] [tool.ruff] exclude = [ - ".cursor", - ".git", - ".github", - ".mypy_cache", - ".ruff_cache", - ".venv", - ".vscode", - "trigger_pipeline", + ".cursor", + ".git", + ".github", + ".mypy_cache", + ".ruff_cache", + ".venv", + ".vscode", + "trigger_pipeline", ] line-length = 150 target-version = "py311" @@ -282,122 +285,122 @@ target-version = "py311" preview = true select = ["ALL"] ignore = [ - "ANN201", # Missing return type annotation for public function `my_func` - "ANN202", # Missing return type annotation for private function `my_func` - "ANN204", # Missing return type annotation for special method `my_func` - "ANN206", # Missing return type annotation for classmethod `my_func` - "ANN401", # Dynamically typed expressions (typing.Any) are disallowed in `...` - "ASYNC230", # Async functions should not open files with blocking methods like `open` - "ASYNC240", # Async functions should not use pathlib.Path methods, use trio.Path or anyio.path - - "B903", # Class could be dataclass or namedtuple - - "C901", # Is to complex - "COM812", # Checks for the absence of trailing commas. - - "CPY001", # Missing copyright notice at top of file - - "D100", # Missing docstring in public module - "D101", # Missing docstring in public class - "D102", # Missing docstring in public method - "D103", # Missing docstring in public function - "D104", # Missing docstring in public package - "D105", # Missing docstring in magic method - "D107", # Missing docstring in __init__ - "D205", # 1 blank line required between summary line and description - "D400", # First line should end with a period - "D401", # First line of docstring should be in imperative mood: "My docstring...." - "D404", # First word of the docstring should not be "This" - "D415", # First line should end with a period, question mark, or exclamation point - - "DOC201", # `return` is not documented in docstring - "DOC202", # Docstring should not have a returns section because the function doesn't return anything - "DOC402", # `yield` is not documented in docstring - "DOC502", # Raised exception is not explicitly raised: `FileNotFoundError` - "DOC501", # Raised exception `ModuleFileError` missing from docstring - - "DTZ001", # `datetime.datetime()` called without a `tzinfo` argument - "DTZ005", # `datetime.datetime.now()` called without a `tz` argument - - "ERA001", # Found commented-out code - - "FBT001", # Boolean-typed positional argument in function definition - "FBT002", # Boolean default positional argument in function definition - "FBT003", #Boolean positional value in function call - - "FIX002", # Line contains TODO, consider resolving the issue - - "FURB101", # `open` and `read` should be replaced by `Path(file_path.path).read_text(encoding="utf-8")` - "FURB113", # Checks for consecutive calls to append. - "FURB152", # Checks for literals that are similar to constants in math module. - - "LOG004", # `.exception()` call outside exception handlers - - "PLC0105", # `TypeVar` name "SomethingType" does not reflect its covariance; consider renaming it to "SomethingType_co" - "PLC1901", # Checks for comparisons to empty strings. - - "PLR0904", # Too many public methods ( > 20) - "PLR0911", # Too many return statements (/6) - "PLR0912", # Too many branches (/12) - "PLR0913", # Too many arguments in function definition (/5) - "PLR0914", # Too many local variables ( /15) - "PLR0915", # Too many statements (/50) - "PLR0917", # Too many positional arguments ( /5) - "PLR2004", # Magic value used in comparison, consider replacing `2` with a constant variable - "PLR6301", # Too many return statements in `for` loop - "PLR1702", # Too many nested blocks ( > 5) - - "PT013", # Incorrect import of `pytest`; use `import pytest` instead - - "PTH100", # `os.path.abspath()` should be replaced by `Path.resolve()` - "PTH103", # `os.makedirs()` should be replaced by `Path.mkdir(parents=True)` - "PTH107", # `os.remove()` should be replaced by `Path.unlink()` - "PTH109", # `os.getcwd()` should be replaced by `Path.cwd()` - "PTH118", # `os.path.join()` should be replaced by `Path` with `/` operator - "PTH120", # `os.path.dirname()` should be replaced by `Path.parent` - "PTH110", # `os.path.exists()` should be replaced by `Path.exists()` - "PTH112", # `os.path.isdir()` should be replaced by `Path.is_dir()` - "PTH119", # `os.path.basename()` should be replaced by `Path.name` - "PTH123", # `open()` should be replaced by `Path.open()` - "PTH208", # Use `pathlib.Path.iterdir()` instead. - - "PYI051", # `Literal["auto"]` is redundant in a union with `str` - - "RET505", # superfluous-else-return - - "RUF001", # String contains ambiguous `β€²` (PRIME). Did you mean ``` (GRAVE ACCENT)? - "RUF003", # Comment contains ambiguous `’` (RIGHT SINGLE QUOTATION MARK). Did you mean ``` (GRAVE ACCENT)? - "RUF022", # Checks for __all__ definitions that are not ordered according to an "isort-style" sort. - - "SIM105", # Use `contextlib.suppress(ValueError)` instead of `try`-`except`-`pass` - "SIM108", # Use ternary operator `description = func.__doc__.strip().split("\n")[0] if func.__doc__ else func.__name__` instead of `if`-`else`-block - - "S101", # Use of `assert` detected - "S102", # Use of `exec` detected - "S106", # Possible hardcoded password assigned to argument: "secret" - "S105", # Possible hardcoded password assigned to: "child_secret" - - "S311", # Cryptographically weak pseudo-random number generator - - "TD002", # Missing author in TODO; try: `# TODO(): ...` or `# TODO @: ...` - "TD003", # Missing issue link for this TODO - - "T201", # `print` found - - # TODO: stop ignoring these rules - "BLE001", # Do not catch blind exception: `Exception` - "B027", # Checks for empty methods in abstract base classes without an abstract decorator. - "UP007", # Use `X | Y` for type annotations - "UP036", # Version block is outdated for minimum Python version - "SIM102", # Use a single `if` statement instead of nested `if` statements - "S701", # Using jinja2 templates with `autoescape=False` is dangerous and can lead to XSS. Ensure `autoescape=True` or use the `select_autoescape` function. - "TRY301", # Abstract `raise` to an inner function - "PERF401", # Use a list comprehension to create a transformed list - "PLW2901", # `for` loop variable `line` overwritten by assignment target - "TRY300", # Consider moving this statement to an `else` block - "UP035", # `typing.List` is deprecated, use `list` instead - "RET503", # Missing explicit `return` at the end of function able to return non-`None` value - "UP017", # Use `datetime.UTC` alias - but UTC only available in Python 3.11+ + "ANN201", # Missing return type annotation for public function `my_func` + "ANN202", # Missing return type annotation for private function `my_func` + "ANN204", # Missing return type annotation for special method `my_func` + "ANN206", # Missing return type annotation for classmethod `my_func` + "ANN401", # Dynamically typed expressions (typing.Any) are disallowed in `...` + "ASYNC230", # Async functions should not open files with blocking methods like `open` + "ASYNC240", # Async functions should not use pathlib.Path methods, use trio.Path or anyio.path + + "B903", # Class could be dataclass or namedtuple + + "C901", # Is to complex + "COM812", # Checks for the absence of trailing commas. + + "CPY001", # Missing copyright notice at top of file + + "D100", # Missing docstring in public module + "D101", # Missing docstring in public class + "D102", # Missing docstring in public method + "D103", # Missing docstring in public function + "D104", # Missing docstring in public package + "D105", # Missing docstring in magic method + "D107", # Missing docstring in __init__ + "D205", # 1 blank line required between summary line and description + "D400", # First line should end with a period + "D401", # First line of docstring should be in imperative mood: "My docstring...." + "D404", # First word of the docstring should not be "This" + "D415", # First line should end with a period, question mark, or exclamation point + + "DOC201", # `return` is not documented in docstring + "DOC202", # Docstring should not have a returns section because the function doesn't return anything + "DOC402", # `yield` is not documented in docstring + "DOC502", # Raised exception is not explicitly raised: `FileNotFoundError` + "DOC501", # Raised exception `ModuleFileError` missing from docstring + + "DTZ001", # `datetime.datetime()` called without a `tzinfo` argument + "DTZ005", # `datetime.datetime.now()` called without a `tz` argument + + "ERA001", # Found commented-out code + + "FBT001", # Boolean-typed positional argument in function definition + "FBT002", # Boolean default positional argument in function definition + "FBT003", #Boolean positional value in function call + + "FIX002", # Line contains TODO, consider resolving the issue + + "FURB101", # `open` and `read` should be replaced by `Path(file_path.path).read_text(encoding="utf-8")` + "FURB113", # Checks for consecutive calls to append. + "FURB152", # Checks for literals that are similar to constants in math module. + + "LOG004", # `.exception()` call outside exception handlers + + "PLC0105", # `TypeVar` name "SomethingType" does not reflect its covariance; consider renaming it to "SomethingType_co" + "PLC1901", # Checks for comparisons to empty strings. + + "PLR0904", # Too many public methods ( > 20) + "PLR0911", # Too many return statements (/6) + "PLR0912", # Too many branches (/12) + "PLR0913", # Too many arguments in function definition (/5) + "PLR0914", # Too many local variables ( /15) + "PLR0915", # Too many statements (/50) + "PLR0917", # Too many positional arguments ( /5) + "PLR2004", # Magic value used in comparison, consider replacing `2` with a constant variable + "PLR6301", # Too many return statements in `for` loop + "PLR1702", # Too many nested blocks ( > 5) + + "PT013", # Incorrect import of `pytest`; use `import pytest` instead + + "PTH100", # `os.path.abspath()` should be replaced by `Path.resolve()` + "PTH103", # `os.makedirs()` should be replaced by `Path.mkdir(parents=True)` + "PTH107", # `os.remove()` should be replaced by `Path.unlink()` + "PTH109", # `os.getcwd()` should be replaced by `Path.cwd()` + "PTH118", # `os.path.join()` should be replaced by `Path` with `/` operator + "PTH120", # `os.path.dirname()` should be replaced by `Path.parent` + "PTH110", # `os.path.exists()` should be replaced by `Path.exists()` + "PTH112", # `os.path.isdir()` should be replaced by `Path.is_dir()` + "PTH119", # `os.path.basename()` should be replaced by `Path.name` + "PTH123", # `open()` should be replaced by `Path.open()` + "PTH208", # Use `pathlib.Path.iterdir()` instead. + + "PYI051", # `Literal["auto"]` is redundant in a union with `str` + + "RET505", # superfluous-else-return + + "RUF001", # String contains ambiguous `β€²` (PRIME). Did you mean ``` (GRAVE ACCENT)? + "RUF003", # Comment contains ambiguous `’` (RIGHT SINGLE QUOTATION MARK). Did you mean ``` (GRAVE ACCENT)? + "RUF022", # Checks for __all__ definitions that are not ordered according to an "isort-style" sort. + + "SIM105", # Use `contextlib.suppress(ValueError)` instead of `try`-`except`-`pass` + "SIM108", # Use ternary operator `description = func.__doc__.strip().split("\n")[0] if func.__doc__ else func.__name__` instead of `if`-`else`-block + + "S101", # Use of `assert` detected + "S102", # Use of `exec` detected + "S106", # Possible hardcoded password assigned to argument: "secret" + "S105", # Possible hardcoded password assigned to: "child_secret" + + "S311", # Cryptographically weak pseudo-random number generator + + "TD002", # Missing author in TODO; try: `# TODO(): ...` or `# TODO @: ...` + "TD003", # Missing issue link for this TODO + + "T201", # `print` found + + # TODO: stop ignoring these rules + "BLE001", # Do not catch blind exception: `Exception` + "B027", # Checks for empty methods in abstract base classes without an abstract decorator. + "UP007", # Use `X | Y` for type annotations + "UP036", # Version block is outdated for minimum Python version + "SIM102", # Use a single `if` statement instead of nested `if` statements + "S701", # Using jinja2 templates with `autoescape=False` is dangerous and can lead to XSS. Ensure `autoescape=True` or use the `select_autoescape` function. + "TRY301", # Abstract `raise` to an inner function + "PERF401", # Use a list comprehension to create a transformed list + "PLW2901", # `for` loop variable `line` overwritten by assignment target + "TRY300", # Consider moving this statement to an `else` block + "UP035", # `typing.List` is deprecated, use `list` instead + "RET503", # Missing explicit `return` at the end of function able to return non-`None` value + "UP017", # Use `datetime.UTC` alias - but UTC only available in Python 3.11+ ] [tool.ruff.lint.pydocstyle] @@ -405,7 +408,7 @@ convention = "google" [tool.ruff.lint.per-file-ignores] "tests/**/*.py" = [ - "INP001", # Allow test files to not have __init__.py in their directories (avoids namespace collisions) + "INP001", # Allow test files to not have __init__.py in their directories (avoids namespace collisions) ] [tool.uv] @@ -430,8 +433,8 @@ reports = false [tool.pylint.messages_control] disable = ["all"] enable = [ - "W0101", # Unreachable code: Used when there is some code behind a "return" or "raise" statement, which will never be accessed. - "C0103", # invalid-name (naming convention) + "W0101", # Unreachable code: Used when there is some code behind a "return" or "raise" statement, which will never be accessed. + "C0103", # invalid-name (naming convention) ] ignore = [".venv", "__pycache__", "build", "dist", ".git"] diff --git a/tests/data/packages/analytics_dep/METHODS.toml b/tests/data/packages/analytics_dep/METHODS.toml new file mode 100644 index 000000000..7620c8b1b --- /dev/null +++ b/tests/data/packages/analytics_dep/METHODS.toml @@ -0,0 +1,7 @@ +[package] +address = "github.com/mthds/analytics-lib" +version = "1.0.0" +description = "Analytics library for cross-package collision testing" + +[exports.pkg_test_analytics_dep] +pipes = ["pkg_test_compute_analytics"] diff --git a/tests/data/packages/analytics_dep/analytics.mthds b/tests/data/packages/analytics_dep/analytics.mthds new file mode 100644 index 000000000..6a47364d0 --- /dev/null +++ b/tests/data/packages/analytics_dep/analytics.mthds @@ -0,0 +1,14 @@ +domain = "pkg_test_analytics_dep" +main_pipe = "pkg_test_compute_analytics" + +[concept.PkgTestWeightedScore] +description = "A weighted score from the analytics library (same code as scoring_dep)" + +[pipe.pkg_test_compute_analytics] +type = "PipeLLM" +description = "Compute analytics" +output = "PkgTestWeightedScore" +prompt = "Compute analytics for: {{ data }}" + +[pipe.pkg_test_compute_analytics.inputs] +data = "Text" diff --git a/tests/data/packages/consumer_package/METHODS.toml b/tests/data/packages/consumer_package/METHODS.toml new file mode 100644 index 000000000..caffba7b2 --- /dev/null +++ b/tests/data/packages/consumer_package/METHODS.toml @@ -0,0 +1,10 @@ +[package] +address = "github.com/mthds/consumer-app" +version = "1.0.0" +description = "Consumer package that depends on scoring-lib" + +[dependencies] +scoring_dep = { address = "github.com/mthds/scoring-lib", version = "2.0.0", path = "../scoring_dep" } + +[exports.pkg_test_consumer_analysis] +pipes = ["pkg_test_analyze_item"] diff --git a/tests/data/packages/consumer_package/analysis.mthds b/tests/data/packages/consumer_package/analysis.mthds new file mode 100644 index 000000000..4a7e53960 --- /dev/null +++ b/tests/data/packages/consumer_package/analysis.mthds @@ -0,0 +1,26 @@ +domain = "pkg_test_consumer_analysis" +main_pipe = "pkg_test_analyze_item" + +[concept.PkgTestAnalysisResult] +description = "Analysis result combining scoring" + +[pipe.pkg_test_analyze_item] +type = "PipeSequence" +description = "Analyze an item using scoring dependency" +output = "PkgTestAnalysisResult" +steps = [ + { pipe = "scoring_dep->pkg_test_scoring_dep.pkg_test_compute_score" }, + { pipe = "pkg_test_summarize" }, +] + +[pipe.pkg_test_analyze_item.inputs] +item = "Text" + +[pipe.pkg_test_summarize] +type = "PipeLLM" +description = "Summarize the analysis" +output = "PkgTestAnalysisResult" +prompt = "Summarize the analysis for: {{ item }}" + +[pipe.pkg_test_summarize.inputs] +item = "Text" diff --git a/tests/data/packages/invalid_manifests/bad_address.toml b/tests/data/packages/invalid_manifests/bad_address.toml new file mode 100644 index 000000000..2fcc316b2 --- /dev/null +++ b/tests/data/packages/invalid_manifests/bad_address.toml @@ -0,0 +1,4 @@ +[package] +address = "no-dots-or-slashes" +version = "1.0.0" +description = "Test package with invalid address" diff --git a/tests/data/packages/invalid_manifests/bad_exports_domain.toml b/tests/data/packages/invalid_manifests/bad_exports_domain.toml new file mode 100644 index 000000000..ce5ef588e --- /dev/null +++ b/tests/data/packages/invalid_manifests/bad_exports_domain.toml @@ -0,0 +1,7 @@ +[package] +address = "github.com/org/repo" +version = "1.0.0" +description = "Test package with invalid exports domain" + +[exports.InvalidDomain] +pipes = ["my_pipe"] diff --git a/tests/data/packages/invalid_manifests/bad_exports_pipe.toml b/tests/data/packages/invalid_manifests/bad_exports_pipe.toml new file mode 100644 index 000000000..da90b2ce8 --- /dev/null +++ b/tests/data/packages/invalid_manifests/bad_exports_pipe.toml @@ -0,0 +1,7 @@ +[package] +address = "github.com/org/repo" +version = "1.0.0" +description = "Test package with invalid exports pipe name" + +[exports.valid_domain] +pipes = ["InvalidPipeName"] diff --git a/tests/data/packages/invalid_manifests/bad_version.toml b/tests/data/packages/invalid_manifests/bad_version.toml new file mode 100644 index 000000000..c39e71739 --- /dev/null +++ b/tests/data/packages/invalid_manifests/bad_version.toml @@ -0,0 +1,4 @@ +[package] +address = "github.com/org/repo" +version = "not-a-version" +description = "Test package with invalid version" diff --git a/tests/data/packages/invalid_manifests/duplicate_aliases.toml b/tests/data/packages/invalid_manifests/duplicate_aliases.toml new file mode 100644 index 000000000..82891027c --- /dev/null +++ b/tests/data/packages/invalid_manifests/duplicate_aliases.toml @@ -0,0 +1,7 @@ +[package] +address = "github.com/org/repo" +version = "1.0.0" +description = "Test package with duplicate aliases" + +[dependencies] +my_dep = { address = "github.com/org/dep1", version = "1.0.0" } diff --git a/tests/data/packages/invalid_manifests/missing_required_fields.toml b/tests/data/packages/invalid_manifests/missing_required_fields.toml new file mode 100644 index 000000000..9b09112bc --- /dev/null +++ b/tests/data/packages/invalid_manifests/missing_required_fields.toml @@ -0,0 +1,2 @@ +[package] +description = "Missing address and version" diff --git a/tests/data/packages/legal_tools/METHODS.toml b/tests/data/packages/legal_tools/METHODS.toml new file mode 100644 index 000000000..fe4a79156 --- /dev/null +++ b/tests/data/packages/legal_tools/METHODS.toml @@ -0,0 +1,17 @@ +[package] +address = "github.com/pipelexlab/legal-tools" +display_name = "Legal Tools" +version = "1.0.0" +description = "Legal document analysis tools" +authors = ["PipelexLab"] +license = "MIT" +mthds_version = "0.5.0" + +[dependencies] +scoring_lib = { address = "github.com/pipelexlab/scoring-lib", version = "2.0.0" } + +[exports.pkg_test_legal.contracts] +pipes = ["pkg_test_extract_clause", "pkg_test_analyze_contract"] + +[exports.pkg_test_scoring] +pipes = ["pkg_test_compute_weighted_score"] diff --git a/tests/data/packages/legal_tools/legal/contracts.mthds b/tests/data/packages/legal_tools/legal/contracts.mthds new file mode 100644 index 000000000..9c21f6cf1 --- /dev/null +++ b/tests/data/packages/legal_tools/legal/contracts.mthds @@ -0,0 +1,23 @@ +domain = "pkg_test_legal.contracts" +main_pipe = "pkg_test_extract_clause" + +[concept.PkgTestContractClause] +description = "A clause extracted from a contract" + +[pipe.pkg_test_extract_clause] +type = "PipeLLM" +description = "Extract the main clause from a contract" +output = "PkgTestContractClause" +prompt = "Extract the main clause from the following contract text: {{ text }}" + +[pipe.pkg_test_extract_clause.inputs] +text = "Text" + +[pipe.pkg_test_analyze_contract] +type = "PipeLLM" +description = "Full contract analysis" +output = "PkgTestContractClause" +prompt = "Analyze the following contract: {{ text }}" + +[pipe.pkg_test_analyze_contract.inputs] +text = "Text" diff --git a/tests/data/packages/legal_tools/scoring/scoring.mthds b/tests/data/packages/legal_tools/scoring/scoring.mthds new file mode 100644 index 000000000..6a6404ff2 --- /dev/null +++ b/tests/data/packages/legal_tools/scoring/scoring.mthds @@ -0,0 +1,23 @@ +domain = "pkg_test_scoring" +main_pipe = "pkg_test_compute_weighted_score" + +[concept.PkgTestScoreResult] +description = "A weighted score result" + +[pipe.pkg_test_compute_weighted_score] +type = "PipeLLM" +description = "Compute a weighted score for an item" +output = "PkgTestScoreResult" +prompt = "Compute a weighted score for: {{ item }}" + +[pipe.pkg_test_compute_weighted_score.inputs] +item = "Text" + +[pipe.pkg_test_private_helper] +type = "PipeLLM" +description = "Helper pipe for internal scoring" +output = "Text" +prompt = "Helper pipe for internal scoring: {{ data }}" + +[pipe.pkg_test_private_helper.inputs] +data = "Text" diff --git a/tests/data/packages/minimal_package/METHODS.toml b/tests/data/packages/minimal_package/METHODS.toml new file mode 100644 index 000000000..36bf23154 --- /dev/null +++ b/tests/data/packages/minimal_package/METHODS.toml @@ -0,0 +1,4 @@ +[package] +address = "github.com/pipelexlab/minimal" +version = "0.1.0" +description = "A minimal MTHDS package" diff --git a/tests/data/packages/minimal_package/core.mthds b/tests/data/packages/minimal_package/core.mthds new file mode 100644 index 000000000..7fac6ae03 --- /dev/null +++ b/tests/data/packages/minimal_package/core.mthds @@ -0,0 +1,7 @@ +domain = "pkg_test_minimal_core" + +[pipe.pkg_test_hello] +type = "PipeLLM" +description = "Say hello" +output = "Text" +prompt = "Say hello" diff --git a/tests/data/packages/multi_dep_consumer/METHODS.toml b/tests/data/packages/multi_dep_consumer/METHODS.toml new file mode 100644 index 000000000..ee86d299c --- /dev/null +++ b/tests/data/packages/multi_dep_consumer/METHODS.toml @@ -0,0 +1,11 @@ +[package] +address = "github.com/mthds/multi-dep-app" +version = "1.0.0" +description = "Consumer depending on both scoring and analytics" + +[dependencies] +scoring_dep = { address = "github.com/mthds/scoring-lib", version = "2.0.0", path = "../scoring_dep" } +analytics_dep = { address = "github.com/mthds/analytics-lib", version = "1.0.0", path = "../analytics_dep" } + +[exports.pkg_test_multi_dep] +pipes = ["pkg_test_multi_analyze"] diff --git a/tests/data/packages/multi_dep_consumer/multi.mthds b/tests/data/packages/multi_dep_consumer/multi.mthds new file mode 100644 index 000000000..ad44e64d9 --- /dev/null +++ b/tests/data/packages/multi_dep_consumer/multi.mthds @@ -0,0 +1,26 @@ +domain = "pkg_test_multi_dep" +main_pipe = "pkg_test_multi_analyze" + +[concept.PkgTestMultiResult] +description = "Result combining scoring and analytics" + +[pipe.pkg_test_multi_analyze] +type = "PipeSequence" +description = "Analyze using both scoring and analytics" +output = "PkgTestMultiResult" +steps = [ + { pipe = "scoring_dep->pkg_test_scoring_dep.pkg_test_compute_score" }, + { pipe = "pkg_test_summarize_multi" }, +] + +[pipe.pkg_test_multi_analyze.inputs] +item = "Text" + +[pipe.pkg_test_summarize_multi] +type = "PipeLLM" +description = "Summarize multi-dep analysis" +output = "PkgTestMultiResult" +prompt = "Summarize: {{ item }}" + +[pipe.pkg_test_summarize_multi.inputs] +item = "Text" diff --git a/tests/data/packages/refining_consumer/METHODS.toml b/tests/data/packages/refining_consumer/METHODS.toml new file mode 100644 index 000000000..d97fc2f4b --- /dev/null +++ b/tests/data/packages/refining_consumer/METHODS.toml @@ -0,0 +1,10 @@ +[package] +address = "github.com/mthds/refining-app" +version = "1.0.0" +description = "Consumer with concept that refines a cross-package concept" + +[dependencies] +scoring_dep = { address = "github.com/mthds/scoring-lib", version = "2.0.0", path = "../scoring_dep" } + +[exports.pkg_test_refining] +pipes = ["pkg_test_refine_score"] diff --git a/tests/data/packages/refining_consumer/refining.mthds b/tests/data/packages/refining_consumer/refining.mthds new file mode 100644 index 000000000..430af145c --- /dev/null +++ b/tests/data/packages/refining_consumer/refining.mthds @@ -0,0 +1,15 @@ +domain = "pkg_test_refining" +main_pipe = "pkg_test_refine_score" + +[concept.PkgTestRefinedScore] +description = "A refined score that extends the dependency's weighted score" +refines = "scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore" + +[pipe.pkg_test_refine_score] +type = "PipeLLM" +description = "Compute a refined score" +output = "PkgTestRefinedScore" +prompt = "Refine the score for: {{ item }}" + +[pipe.pkg_test_refine_score.inputs] +item = "Text" diff --git a/tests/data/packages/scoring_dep/METHODS.toml b/tests/data/packages/scoring_dep/METHODS.toml new file mode 100644 index 000000000..c7bdaf827 --- /dev/null +++ b/tests/data/packages/scoring_dep/METHODS.toml @@ -0,0 +1,7 @@ +[package] +address = "github.com/mthds/scoring-lib" +version = "2.0.0" +description = "Scoring library for cross-package testing" + +[exports.pkg_test_scoring_dep] +pipes = ["pkg_test_compute_score"] diff --git a/tests/data/packages/scoring_dep/scoring.mthds b/tests/data/packages/scoring_dep/scoring.mthds new file mode 100644 index 000000000..6664a82c6 --- /dev/null +++ b/tests/data/packages/scoring_dep/scoring.mthds @@ -0,0 +1,23 @@ +domain = "pkg_test_scoring_dep" +main_pipe = "pkg_test_compute_score" + +[concept.PkgTestWeightedScore] +description = "A weighted score result from the scoring library" + +[pipe.pkg_test_compute_score] +type = "PipeLLM" +description = "Compute a weighted score" +output = "PkgTestWeightedScore" +prompt = "Compute a weighted score for: {{ item }}" + +[pipe.pkg_test_compute_score.inputs] +item = "Text" + +[pipe.pkg_test_internal_helper] +type = "PipeLLM" +description = "Internal helper not exported" +output = "Text" +prompt = "Internal helper: {{ data }}" + +[pipe.pkg_test_internal_helper.inputs] +data = "Text" diff --git a/tests/data/packages/standalone_bundle/my_pipe.mthds b/tests/data/packages/standalone_bundle/my_pipe.mthds new file mode 100644 index 000000000..0ac227e96 --- /dev/null +++ b/tests/data/packages/standalone_bundle/my_pipe.mthds @@ -0,0 +1,7 @@ +domain = "pkg_test_standalone" + +[pipe.pkg_test_do_something] +type = "PipeLLM" +description = "Do something useful" +output = "Text" +prompt = "Do something useful" diff --git a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py index f113938e3..b722d64c9 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py index fda04acc4..943274969 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py index a4c1e11b9..1b1333162 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.plx b/tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.mthds similarity index 66% rename from tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.plx rename to tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.mthds index 3a5205e57..54552ad07 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.plx +++ b/tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.mthds @@ -1,6 +1,6 @@ -domain = "nested_concepts_test" +domain = "nested_concepts_test" description = "Test domain for concept-to-concept references (nested concepts)" -main_pipe = "generate_invoice" +main_pipe = "generate_invoice" # Define the LineItem concept with a structure [concept.LineItem] @@ -8,15 +8,15 @@ description = "A single line item in an invoice" [concept.LineItem.structure] product_name = { type = "text", description = "Name of the product", required = true } -quantity = { type = "integer", description = "Quantity ordered", required = true } -unit_price = { type = "number", description = "Price per unit", required = true } +quantity = { type = "integer", description = "Quantity ordered", required = true } +unit_price = { type = "number", description = "Price per unit", required = true } # Define the Customer concept with a structure [concept.Customer] description = "A customer for an invoice" [concept.Customer.structure] -name = { type = "text", description = "Customer's full name", required = true } +name = { type = "text", description = "Customer's full name", required = true } email = { type = "text", description = "Customer's email address", required = true } # Define the Invoice concept with nested concept references @@ -25,10 +25,10 @@ description = "An invoice with customer and line items" [concept.Invoice.structure] invoice_number = { type = "text", description = "Unique invoice identifier", required = true } -customer = { type = "concept", concept_ref = "nested_concepts_test.Customer", description = "The customer for this invoice", required = true } -line_items = { type = "list", item_type = "concept", item_concept_ref = "nested_concepts_test.LineItem", description = "List of line items in the invoice", required = true } -total_amount = { type = "number", description = "Total invoice amount", required = true } -notes = { type = "text", description = "Optional notes for the invoice", required = false } +customer = { type = "concept", concept_ref = "nested_concepts_test.Customer", description = "The customer for this invoice", required = true } +line_items = { type = "list", item_type = "concept", item_concept_ref = "nested_concepts_test.LineItem", description = "List of line items in the invoice", required = true } +total_amount = { type = "number", description = "Total invoice amount", required = true } +notes = { type = "text", description = "Optional notes for the invoice", required = false } # Pipe to generate an invoice from text description [pipe.generate_invoice] diff --git a/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py b/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py index 55be1374c..8259496bc 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py @@ -1,7 +1,7 @@ """E2E test for pipes with nested concept-to-concept references. This test verifies that: -1. Concepts with nested concept references can be loaded from PLX files +1. Concepts with nested concept references can be loaded from MTHDS files 2. The dependency graph correctly orders concept loading 3. Pipes can generate structured output with nested concepts 4. The generated output contains properly typed nested objects @@ -14,7 +14,7 @@ from pipelex import pretty_print from pipelex.core.stuffs.text_content import TextContent from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from tests.e2e.pipelex.concepts.nested_concepts.generated_models.nested_concepts_test__customer import Customer from tests.e2e.pipelex.concepts.nested_concepts.generated_models.nested_concepts_test__invoice import Invoice from tests.e2e.pipelex.concepts.nested_concepts.generated_models.nested_concepts_test__line_item import LineItem @@ -31,21 +31,24 @@ async def test_invoice_with_nested_customer_and_line_items(self, pipe_run_mode: """Test that a pipe can generate an Invoice with nested Customer and LineItem concepts. This test verifies the complete flow: - 1. PLX file with concept-to-concept references is loaded + 1. MTHDS file with concept-to-concept references is loaded 2. Concepts are loaded in topological order (LineItem, Customer before Invoice) 3. The LLM generates structured output with proper nested types 4. The output can be accessed via working_memory.get_stuff_as() with typed models """ - pipe_output = await execute_pipeline( - pipe_code="generate_invoice", + runner = PipelexRunner( library_dirs=["tests/e2e/pipelex/concepts/nested_concepts"], + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( + pipe_code="generate_invoice", inputs={ "description_text": TextContent( text="Create an invoice for John Smith (john.smith@example.com) who ordered 3 widgets at $10 each and 2 gadgets at $25 each." ), }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output # Verify the concept metadata assert pipe_output.main_stuff.concept.code == "Invoice" diff --git a/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py b/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py index 3707549bb..80de61124 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py @@ -33,22 +33,22 @@ async def test_generate_and_import_nested_concept_structures(self): """Test that generated structure files for nested concepts are importable and usable. This test: - 1. Uses the existing nested_concepts.plx file with concept-to-concept references + 1. Uses the existing nested_concepts.mthds file with concept-to-concept references 2. Generates Python structure files via the CLI helper function 3. Dynamically imports the generated modules 4. Instantiates the generated classes 5. Verifies nested concept references work correctly """ - # Path to the PLX file with nested concepts - plx_file_path = Path("tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.plx").resolve() - assert plx_file_path.exists(), f"PLX file not found: {plx_file_path}" + # Path to the MTHDS file with nested concepts + mthds_file_path = Path("tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.mthds").resolve() + assert mthds_file_path.exists(), f"MTHDS file not found: {mthds_file_path}" # Create a temporary directory for generated structures with tempfile.TemporaryDirectory() as temp_dir: output_directory = Path(temp_dir) - # Validate the PLX file to get blueprints - validate_result = await validate_bundle(plx_file_path=plx_file_path) + # Validate the MTHDS file to get blueprints + validate_result = await validate_bundle(mthds_file_path=mthds_file_path) blueprints = validate_result.blueprints # Generate structure files diff --git a/tests/e2e/pipelex/graph/test_graph_with_full_data.py b/tests/e2e/pipelex/graph/test_graph_with_full_data.py index 1a5b848bf..cbaf18312 100644 --- a/tests/e2e/pipelex/graph/test_graph_with_full_data.py +++ b/tests/e2e/pipelex/graph/test_graph_with_full_data.py @@ -22,7 +22,7 @@ from pipelex.graph.reactflow.reactflow_html import generate_reactflow_html_async from pipelex.graph.reactflow.viewspec_transformer import graphspec_to_viewspec from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.tools.misc.chart_utils import FlowchartDirection from pipelex.tools.misc.file_utils import get_incremental_directory_path, save_text_to_path from tests.cases import DocumentTestCases @@ -114,16 +114,19 @@ async def test_graph_captures_full_data(self, pipe_run_mode: PipeRunMode): ) # Run pipeline with graph tracing and full data capture - pipe_output = await execute_pipeline( - pipe_code="cv_job_matcher", + runner = PipelexRunner( library_dirs=["tests/e2e/pipelex/pipes/pipe_operators/pipe_compose"], + pipe_run_mode=pipe_run_mode, + execution_config=exec_config, + ) + response = await runner.execute_pipeline( + pipe_code="cv_job_matcher", inputs={ "cv_pdf": DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_CV), "job_offer_pdf": DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_2), }, - pipe_run_mode=pipe_run_mode, - execution_config=exec_config, ) + pipe_output = response.pipe_output # Basic assertions assert pipe_output is not None diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.mthds similarity index 65% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.mthds index ddd0d819a..a4b96b23b 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.plx +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.mthds @@ -1,33 +1,33 @@ -domain = "cv_job_batch" -description = "Analyzing CV and job offer compatibility and generating interview questions" +domain = "cv_job_batch" +description = "Analyzing CV and job offer compatibility and generating interview questions" system_prompt = "None" -main_pipe = "batch_analyze_cvs_for_job_offer" +main_pipe = "batch_analyze_cvs_for_job_offer" [concept.CandidateProfile] description = "A structured summary of a job candidate's professional background extracted from their CV." [concept.CandidateProfile.structure] -skills = { type = "text", description = "Technical and soft skills possessed by the candidate", required = true } -experience = { type = "text", description = "Work history and professional experience", required = true } -education = { type = "text", description = "Educational background and qualifications", required = true } +skills = { type = "text", description = "Technical and soft skills possessed by the candidate", required = true } +experience = { type = "text", description = "Work history and professional experience", required = true } +education = { type = "text", description = "Educational background and qualifications", required = true } achievements = { type = "text", description = "Notable accomplishments and certifications" } [concept.JobRequirements] description = "A structured summary of what a job position requires from candidates." [concept.JobRequirements.structure] -required_skills = { type = "text", description = "Skills that are mandatory for the position", required = true } +required_skills = { type = "text", description = "Skills that are mandatory for the position", required = true } responsibilities = { type = "text", description = "Main duties and tasks of the role", required = true } -qualifications = { type = "text", description = "Required education, certifications, or experience levels", required = true } -nice_to_haves = { type = "text", description = "Preferred but not mandatory qualifications" } +qualifications = { type = "text", description = "Required education, certifications, or experience levels", required = true } +nice_to_haves = { type = "text", description = "Preferred but not mandatory qualifications" } [concept.CandidateMatch] description = "An evaluation of how well a candidate fits a job position." [concept.CandidateMatch.structure] -match_score = { type = "number", description = "Numerical score representing overall fit percentage between 0 and 100", required = true } -strengths = { type = "text", description = "Areas where the candidate meets or exceeds requirements", required = true } -gaps = { type = "text", description = "Areas where the candidate falls short of requirements", required = true } +match_score = { type = "number", description = "Numerical score representing overall fit percentage between 0 and 100", required = true } +strengths = { type = "text", description = "Areas where the candidate meets or exceeds requirements", required = true } +gaps = { type = "text", description = "Areas where the candidate falls short of requirements", required = true } overall_assessment = { type = "text", description = "Summary evaluation of the candidate's suitability", required = true } [pipe.batch_analyze_cvs_for_job_offer] @@ -38,17 +38,17 @@ Main orchestrator pipe that takes a bunch of CVs and a job offer in PDF format, inputs = { cvs = "Document[]", job_offer_pdf = "Document" } output = "CandidateMatch[]" steps = [ - { pipe = "extract_one_job_offer", result = "job_offer_pages" }, - { pipe = "analyze_job_requirements", result = "job_requirements" }, - { pipe = "process_cv", batch_over = "cvs", batch_as = "cv_pdf", result = "match_analyses" }, + { pipe = "extract_one_job_offer", result = "job_offer_pages" }, + { pipe = "analyze_job_requirements", result = "job_requirements" }, + { pipe = "process_cv", batch_over = "cvs", batch_as = "cv_pdf", result = "match_analyses" }, ] [pipe.extract_one_job_offer] -type = "PipeExtract" +type = "PipeExtract" description = "Extracts text content from the job offer PDF document" -inputs = { job_offer_pdf = "Document" } -output = "Page[]" -model = "@default-text-from-pdf" +inputs = { job_offer_pdf = "Document" } +output = "Page[]" +model = "@default-text-from-pdf" [pipe.analyze_job_requirements] type = "PipeLLM" @@ -73,17 +73,17 @@ description = "Processes one application" inputs = { cv_pdf = "Document", job_requirements = "JobRequirements" } output = "CandidateMatch" steps = [ - { pipe = "extract_one_cv", result = "cv_pages" }, - { pipe = "analyze_one_cv", result = "candidate_profile" }, - { pipe = "analyze_match", result = "match_analysis" }, + { pipe = "extract_one_cv", result = "cv_pages" }, + { pipe = "analyze_one_cv", result = "candidate_profile" }, + { pipe = "analyze_match", result = "match_analysis" }, ] [pipe.extract_one_cv] -type = "PipeExtract" +type = "PipeExtract" description = "Extracts text content from the CV PDF document" -inputs = { cv_pdf = "Document" } -output = "Page[]" -model = "@default-text-from-pdf" +inputs = { cv_pdf = "Document" } +output = "Page[]" +model = "@default-text-from-pdf" [pipe.analyze_one_cv] type = "PipeLLM" diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.mthds similarity index 61% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.mthds index 9df8b37b6..596de2e94 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.plx +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.mthds @@ -1,22 +1,22 @@ -domain = "joke_generation" +domain = "joke_generation" description = "Generating one-liner jokes from topics" -main_pipe = "generate_jokes_from_topics" +main_pipe = "generate_jokes_from_topics" [concept.Topic] description = "A subject or theme that can be used as the basis for a joke." -refines = "Text" +refines = "Text" [concept.Joke] description = "A humorous one-liner intended to make people laugh." -refines = "Text" +refines = "Text" [pipe.generate_jokes_from_topics] type = "PipeSequence" description = "Main orchestrator pipe that generates 3 joke topics and creates a one-liner joke for each topic" output = "Joke[]" steps = [ - { pipe = "generate_topics", result = "topics" }, - { pipe = "batch_generate_jokes", result = "jokes" }, + { pipe = "generate_topics", result = "topics" }, + { pipe = "batch_generate_jokes", result = "jokes" }, ] [pipe.generate_topics] @@ -32,18 +32,18 @@ Generate 3 distinct and varied topics that would be suitable for crafting clever """ [pipe.batch_generate_jokes] -type = "PipeBatch" -description = "Generate a one-liner joke for each topic by iterating over the topics list" -inputs = { topics = "Topic[]" } -output = "Joke[]" +type = "PipeBatch" +description = "Generate a one-liner joke for each topic by iterating over the topics list" +inputs = { topics = "Topic[]" } +output = "Joke[]" branch_pipe_code = "generate_joke" -input_list_name = "topics" -input_item_name = "topic" +input_list_name = "topics" +input_item_name = "topic" [pipe.generate_joke] -type = "PipeLLM" +type = "PipeLLM" description = "Write a clever one-liner joke based on the given topic" -inputs = { topic = "Topic" } -output = "Joke" -model = "$testing-text" -prompt = "Write a clever one-liner joke about $topic. Be concise and witty." +inputs = { topic = "Topic" } +output = "Joke" +model = "$testing-text" +prompt = "Write a clever one-liner joke about $topic. Be concise and witty." diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py index eb44cc17c..a3de4818d 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py @@ -11,7 +11,7 @@ from pipelex.graph.graph_factory import generate_graph_outputs from pipelex.graph.graphspec import EdgeKind, GraphSpec, NodeSpec from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.tools.misc.file_utils import get_incremental_directory_path, save_text_to_path from tests.cases import DocumentTestCases from tests.conftest import TEST_OUTPUTS_DIR @@ -48,9 +48,13 @@ async def test_pipe_batch_generates_batch_edges(self, pipe_run_mode: PipeRunMode ) # Run PipeBatch pipeline with graph tracing - pipe_output = await execute_pipeline( - pipe_code="batch_analyze_cvs_for_job_offer", + runner = PipelexRunner( library_dirs=["tests/e2e/pipelex/pipes/pipe_controller/pipe_batch"], + pipe_run_mode=pipe_run_mode, + execution_config=exec_config, + ) + response = await runner.execute_pipeline( + pipe_code="batch_analyze_cvs_for_job_offer", inputs={ "cvs": [ DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_CV), @@ -58,9 +62,8 @@ async def test_pipe_batch_generates_batch_edges(self, pipe_run_mode: PipeRunMode ], "job_offer_pdf": DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_2), }, - pipe_run_mode=pipe_run_mode, - execution_config=exec_config, ) + pipe_output = response.pipe_output # Basic assertions assert pipe_output is not None @@ -152,7 +155,7 @@ async def test_pipe_batch_generates_batch_edges(self, pipe_run_mode: PipeRunMode ) async def test_joke_batch_graph_outputs(self, pipe_run_mode: PipeRunMode): - """Simple test that runs joke_batch.plx and generates all graph outputs. + """Simple test that runs joke_batch.mthds and generates all graph outputs. This test runs the joke batch pipeline with graph tracing and generates: - graph.json (GraphSpec) @@ -182,12 +185,15 @@ async def test_joke_batch_graph_outputs(self, pipe_run_mode: PipeRunMode): exec_config = exec_config.model_copy(update={"graph_config": graph_config}) # Run joke batch pipeline - pipe_output = await execute_pipeline( - pipe_code="generate_jokes_from_topics", + runner = PipelexRunner( library_dirs=["tests/e2e/pipelex/pipes/pipe_controller/pipe_batch"], pipe_run_mode=pipe_run_mode, execution_config=exec_config, ) + response = await runner.execute_pipeline( + pipe_code="generate_jokes_from_topics", + ) + pipe_output = response.pipe_output # Basic assertions assert pipe_output is not None diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_3branch.mthds b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_3branch.mthds new file mode 100644 index 000000000..9037ee6b1 --- /dev/null +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_3branch.mthds @@ -0,0 +1,82 @@ +domain = "test_parallel_graph_3branch" +description = "Test 3-branch PipeParallel with selective downstream consumption" +main_pipe = "pg3_sequence" + +[concept.Pg3ToneResult] +description = "Result of tone analysis" +refines = "Text" + +[concept.Pg3LengthResult] +description = "Result of length analysis" +refines = "Text" + +[concept.Pg3StyleResult] +description = "Result of style analysis" +refines = "Text" + +[concept.Pg3CombinedResult] +description = "Combined results from 3-branch parallel analysis" + +[pipe.pg3_sequence] +type = "PipeSequence" +description = "Run 3-branch parallel analysis then selectively consume 2 of 3 branch outputs" +inputs = { input_text = "Text" } +output = "Text" +steps = [ + { pipe = "pg3_parallel", result = "full_combo" }, + { pipe = "pg3_refine_tone", result = "refined_tone" }, + { pipe = "pg3_refine_length", result = "refined_length" }, +] + +[pipe.pg3_parallel] +type = "PipeParallel" +description = "Analyze tone, length, and style in parallel with combined output" +inputs = { input_text = "Text" } +output = "Pg3CombinedResult" +add_each_output = true +combined_output = "Pg3CombinedResult" +branches = [ + { pipe = "pg3_analyze_tone", result = "tone_result" }, + { pipe = "pg3_analyze_length", result = "length_result" }, + { pipe = "pg3_analyze_style", result = "style_result" }, +] + +[pipe.pg3_analyze_tone] +type = "PipeLLM" +description = "Analyze the tone of the text" +inputs = { input_text = "Text" } +output = "Pg3ToneResult" +model = "$testing-text" +prompt = "Describe the tone of: @input_text.text" + +[pipe.pg3_analyze_length] +type = "PipeLLM" +description = "Analyze the length of the text" +inputs = { input_text = "Text" } +output = "Pg3LengthResult" +model = "$testing-text" +prompt = "Describe the length characteristics of: @input_text.text" + +[pipe.pg3_analyze_style] +type = "PipeLLM" +description = "Analyze the writing style of the text" +inputs = { input_text = "Text" } +output = "Pg3StyleResult" +model = "$testing-text" +prompt = "Describe the writing style of: @input_text.text" + +[pipe.pg3_refine_tone] +type = "PipeLLM" +description = "Refine the tone analysis" +inputs = { tone_result = "Pg3ToneResult" } +output = "Text" +model = "$testing-text" +prompt = "Refine and elaborate on this tone analysis: @tone_result.text" + +[pipe.pg3_refine_length] +type = "PipeLLM" +description = "Refine the length analysis" +inputs = { length_result = "Pg3LengthResult" } +output = "Text" +model = "$testing-text" +prompt = "Refine and elaborate on this length analysis: @length_result.text" diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_add_each.mthds b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_add_each.mthds new file mode 100644 index 000000000..6c0edbe85 --- /dev/null +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_add_each.mthds @@ -0,0 +1,59 @@ +domain = "test_parallel_graph_add_each" +description = "Test PipeParallel with add_each_output for graph edge verification" +main_pipe = "parallel_then_consume" + +[concept.ShortSummary] +description = "A brief one-sentence summary" +refines = "Text" + +[concept.DetailedSummary] +description = "A detailed multi-sentence summary" +refines = "Text" + +[pipe.parallel_then_consume] +type = "PipeSequence" +description = "Run parallel summaries then consume one downstream" +inputs = { input_text = "Text" } +output = "Text" +steps = [ + { pipe = "parallel_summarize", result = "..." }, + { pipe = "combine_summaries" }, +] + +[pipe.parallel_summarize] +type = "PipeParallel" +description = "Generate short and detailed summaries in parallel" +inputs = { input_text = "Text" } +output = "Text" +add_each_output = true +branches = [ + { pipe = "summarize_short", result = "short_summary" }, + { pipe = "summarize_detailed", result = "detailed_summary" }, +] + +[pipe.summarize_short] +type = "PipeLLM" +description = "Generate a short one-sentence summary" +inputs = { input_text = "Text" } +output = "ShortSummary" +model = "$testing-text" +prompt = "Summarize in one sentence: @input_text.text" + +[pipe.summarize_detailed] +type = "PipeLLM" +description = "Generate a detailed summary" +inputs = { input_text = "Text" } +output = "DetailedSummary" +model = "$testing-text" +prompt = "Write a detailed summary of: @input_text.text" + +[pipe.combine_summaries] +type = "PipeLLM" +description = "Combine short and detailed summaries into a final result" +inputs = { short_summary = "ShortSummary", detailed_summary = "DetailedSummary" } +output = "Text" +model = "$testing-text" +prompt = """Combine these two summaries into a final result: + +Short: @short_summary.text +Detailed: @detailed_summary.text""" diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_combined.mthds b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_combined.mthds new file mode 100644 index 000000000..9c976d3d1 --- /dev/null +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_combined.mthds @@ -0,0 +1,60 @@ +domain = "test_parallel_graph_combined" +description = "Test PipeParallel with combined_output wrapped in PipeSequence with follow-up consumer" +main_pipe = "pgc_analysis_then_summarize" + +[concept.PgcToneResult] +description = "Result of tone analysis" +refines = "Text" + +[concept.PgcLengthResult] +description = "Result of length analysis" +refines = "Text" + +[concept.PgcCombinedResult] +description = "Combined results from parallel analysis" + +[pipe.pgc_analysis_then_summarize] +type = "PipeSequence" +description = "Run parallel analysis then summarize the combined result" +inputs = { input_text = "Text" } +output = "Text" +steps = [ + { pipe = "pgc_parallel_analysis", result = "pgc_combined_result" }, + { pipe = "pgc_summarize_combined" }, +] + +[pipe.pgc_parallel_analysis] +type = "PipeParallel" +description = "Analyze tone and length in parallel with combined output" +inputs = { input_text = "Text" } +output = "PgcCombinedResult" +add_each_output = true +combined_output = "PgcCombinedResult" +branches = [ + { pipe = "pgc_analyze_tone", result = "tone_result" }, + { pipe = "pgc_analyze_length", result = "length_result" }, +] + +[pipe.pgc_analyze_tone] +type = "PipeLLM" +description = "Analyze the tone of the text" +inputs = { input_text = "Text" } +output = "PgcToneResult" +model = "$testing-text" +prompt = "Describe the tone of: @input_text.text" + +[pipe.pgc_analyze_length] +type = "PipeLLM" +description = "Analyze the length of the text" +inputs = { input_text = "Text" } +output = "PgcLengthResult" +model = "$testing-text" +prompt = "Describe the length characteristics of: @input_text.text" + +[pipe.pgc_summarize_combined] +type = "PipeLLM" +description = "Summarize the combined parallel analysis result" +inputs = { pgc_combined_result = "PgcCombinedResult" } +output = "Text" +model = "$testing-text" +prompt = "Summarize the following analysis: @pgc_combined_result" diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_models.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_models.py new file mode 100644 index 000000000..b073ebaa4 --- /dev/null +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_models.py @@ -0,0 +1,19 @@ +from pydantic import Field + +from pipelex.core.stuffs.structured_content import StructuredContent +from pipelex.core.stuffs.text_content import TextContent + + +class PgcCombinedResult(StructuredContent): + """Combined results from parallel analysis branches.""" + + tone_result: TextContent = Field(..., description="Result of tone analysis") + length_result: TextContent = Field(..., description="Result of length analysis") + + +class Pg3CombinedResult(StructuredContent): + """Combined results from 3-branch parallel analysis.""" + + tone_result: TextContent = Field(..., description="Result of tone analysis") + length_result: TextContent = Field(..., description="Result of length analysis") + style_result: TextContent = Field(..., description="Result of style analysis") diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/test_data.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/test_data.py new file mode 100644 index 000000000..cfd77ed2b --- /dev/null +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/test_data.py @@ -0,0 +1,106 @@ +"""Test data for PipeParallel graph tests.""" + +from typing import ClassVar + + +class ParallelCombinedGraphExpectationsBase: + """Base class for PipeParallel graph expectations with combined_output.""" + + PARALLEL_PIPE_CODE: ClassVar[str] + EXPECTED_PIPE_CODES: ClassVar[set[str]] + EXPECTED_NODE_COUNTS: ClassVar[dict[str, int]] + EXPECTED_EDGE_COUNTS: ClassVar[dict[str, int]] + + +class ParallelAddEachGraphExpectations: + """Expected structure for the parallel_graph_add_each graph.""" + + # Expected node pipe_codes + EXPECTED_PIPE_CODES: ClassVar[set[str]] = { + "parallel_then_consume", # PipeSequence (outer controller) + "parallel_summarize", # PipeParallel (parallel controller) + "summarize_short", # PipeLLM (branch 1) + "summarize_detailed", # PipeLLM (branch 2) + "combine_summaries", # PipeLLM (downstream consumer) + } + + # Expected number of nodes per pipe_code + EXPECTED_NODE_COUNTS: ClassVar[dict[str, int]] = { + "parallel_then_consume": 1, + "parallel_summarize": 1, + "summarize_short": 1, + "summarize_detailed": 1, + "combine_summaries": 1, + } + + # Expected number of edges by kind + EXPECTED_EDGE_COUNTS: ClassVar[dict[str, int]] = { + "contains": 4, # sequence->parallel, sequence->combine, parallel->short, parallel->detailed + "data": 2, # parallel->combine (short_summary), parallel->combine (detailed_summary) + } + + +class ParallelCombinedGraphExpectations(ParallelCombinedGraphExpectationsBase): + """Expected structure for the parallel_graph_combined graph (PipeSequence wrapping PipeParallel with combined_output).""" + + PARALLEL_PIPE_CODE: ClassVar[str] = "pgc_parallel_analysis" + + # Expected node pipe_codes + EXPECTED_PIPE_CODES: ClassVar[set[str]] = { + "pgc_analysis_then_summarize", # PipeSequence (outer controller) + "pgc_parallel_analysis", # PipeParallel (parallel controller with combined_output) + "pgc_analyze_tone", # PipeLLM (branch 1) + "pgc_analyze_length", # PipeLLM (branch 2) + "pgc_summarize_combined", # PipeLLM (downstream consumer of combined result) + } + + # Expected number of nodes per pipe_code + EXPECTED_NODE_COUNTS: ClassVar[dict[str, int]] = { + "pgc_analysis_then_summarize": 1, + "pgc_parallel_analysis": 1, + "pgc_analyze_tone": 1, + "pgc_analyze_length": 1, + "pgc_summarize_combined": 1, + } + + # Expected number of edges by kind + EXPECTED_EDGE_COUNTS: ClassVar[dict[str, int]] = { + "contains": 4, # sequence->parallel, sequence->summarize_combined, parallel->tone, parallel->length + "parallel_combine": 2, # tone_result->combined, length_result->combined + "data": 1, # parallel->summarize_combined (combined result) + } + + +class Parallel3BranchGraphExpectations(ParallelCombinedGraphExpectationsBase): + """Expected structure for the parallel_graph_3branch graph (3-branch PipeParallel with selective consumption).""" + + PARALLEL_PIPE_CODE: ClassVar[str] = "pg3_parallel" + + # Expected node pipe_codes + EXPECTED_PIPE_CODES: ClassVar[set[str]] = { + "pg3_sequence", # PipeSequence (outer controller) + "pg3_parallel", # PipeParallel (3-branch parallel with combined_output) + "pg3_analyze_tone", # PipeLLM (branch 1) + "pg3_analyze_length", # PipeLLM (branch 2) + "pg3_analyze_style", # PipeLLM (branch 3 - unused downstream) + "pg3_refine_tone", # PipeLLM (consumes tone_result) + "pg3_refine_length", # PipeLLM (consumes length_result) + } + + # Expected number of nodes per pipe_code + EXPECTED_NODE_COUNTS: ClassVar[dict[str, int]] = { + "pg3_sequence": 1, + "pg3_parallel": 1, + "pg3_analyze_tone": 1, + "pg3_analyze_length": 1, + "pg3_analyze_style": 1, + "pg3_refine_tone": 1, + "pg3_refine_length": 1, + } + + # Expected number of edges by kind + EXPECTED_EDGE_COUNTS: ClassVar[dict[str, int]] = { + "contains": 6, # sequence->parallel, sequence->refine_tone, sequence->refine_length, parallel->tone, parallel->length, parallel->style + "parallel_combine": 3, # tone->combined, length->combined, style->combined + "data": 2, # parallel->refine_tone (tone_result), parallel->refine_length (length_result) + } diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/test_pipe_parallel_graph.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/test_pipe_parallel_graph.py new file mode 100644 index 000000000..bcb485fe8 --- /dev/null +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/test_pipe_parallel_graph.py @@ -0,0 +1,294 @@ +"""E2E test for PipeParallel with graph tracing to verify DATA edges from controller to consumers.""" + +from collections import Counter +from pathlib import Path + +import pytest + +from pipelex import log, pretty_print +from pipelex.config import get_config +from pipelex.core.stuffs.text_content import TextContent +from pipelex.graph.graph_factory import generate_graph_outputs +from pipelex.graph.graphspec import GraphSpec, NodeSpec +from pipelex.pipe_run.pipe_run_mode import PipeRunMode +from pipelex.pipeline.runner import PipelexRunner +from pipelex.tools.misc.file_utils import get_incremental_directory_path, save_text_to_path +from tests.conftest import TEST_OUTPUTS_DIR +from tests.e2e.pipelex.pipes.pipe_controller.pipe_parallel.test_data import ( + Parallel3BranchGraphExpectations, + ParallelAddEachGraphExpectations, + ParallelCombinedGraphExpectations, + ParallelCombinedGraphExpectationsBase, +) + + +def _get_next_output_folder(subfolder: str) -> Path: + """Get the next numbered output folder for parallel graph outputs.""" + base_dir = str(Path(TEST_OUTPUTS_DIR) / f"pipe_parallel_graph_{subfolder}") + return Path(get_incremental_directory_path(base_dir, "run")) + + +@pytest.mark.dry_runnable +@pytest.mark.llm +@pytest.mark.inference +@pytest.mark.asyncio(loop_scope="class") +class TestPipeParallelGraph: + """E2E tests for PipeParallel graph generation with correct DATA edges.""" + + async def test_parallel_add_each_output_graph(self, pipe_run_mode: PipeRunMode): + """Verify PipeParallel with add_each_output generates correct DATA edges. + + This test runs a PipeSequence containing: + 1. PipeParallel (add_each_output=true) that produces short_summary and detailed_summary + 2. A downstream PipeLLM (combine_summaries) that consumes both branch outputs + + Expected: DATA edges flow from PipeParallel to combine_summaries (not from sub-pipes). + """ + # Build config with graph tracing and all graph outputs enabled + base_config = get_config().pipelex.pipeline_execution_config + exec_config = base_config.with_graph_config_overrides( + generate_graph=True, + force_include_full_data=False, + ) + graph_config = exec_config.graph_config.model_copy( + update={ + "graphs_inclusion": exec_config.graph_config.graphs_inclusion.model_copy( + update={ + "graphspec_json": True, + "mermaidflow_html": True, + "reactflow_html": True, + } + ) + } + ) + exec_config = exec_config.model_copy(update={"graph_config": graph_config}) + + # Run pipeline with input text + runner = PipelexRunner( + library_dirs=["tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel"], + pipe_run_mode=pipe_run_mode, + execution_config=exec_config, + ) + response = await runner.execute_pipeline( + pipe_code="parallel_then_consume", + inputs={ + "input_text": TextContent(text="The quick brown fox jumps over the lazy dog. This is a sample text for testing parallel processing."), + }, + ) + pipe_output = response.pipe_output + + # Basic assertions + assert response.pipe_output is not None + assert response.pipe_output.working_memory is not None + assert response.pipe_output.main_stuff is not None + + assert pipe_output.graph_spec is not None + # Build node lookup + nodes_by_id: dict[str, NodeSpec] = {node.node_id: node for node in pipe_output.graph_spec.nodes} + nodes_by_pipe_code: dict[str, list[NodeSpec]] = {} + for node in pipe_output.graph_spec.nodes: + if node.pipe_code: + nodes_by_pipe_code.setdefault(node.pipe_code, []).append(node) + + # 1. Verify all expected pipe_codes exist + actual_pipe_codes = set(nodes_by_pipe_code.keys()) + assert actual_pipe_codes == ParallelAddEachGraphExpectations.EXPECTED_PIPE_CODES, ( + f"Unexpected pipe codes. Expected: {ParallelAddEachGraphExpectations.EXPECTED_PIPE_CODES}, Got: {actual_pipe_codes}" + ) + + # 2. Verify node counts per pipe_code + for pipe_code, expected_count in ParallelAddEachGraphExpectations.EXPECTED_NODE_COUNTS.items(): + actual_count = len(nodes_by_pipe_code.get(pipe_code, [])) + assert actual_count == expected_count, f"Expected {expected_count} nodes for pipe_code '{pipe_code}', got {actual_count}" + + # 3. Verify edge counts by kind + actual_edge_counts = Counter(str(edge.kind) for edge in pipe_output.graph_spec.edges) + for kind, expected_count in ParallelAddEachGraphExpectations.EXPECTED_EDGE_COUNTS.items(): + actual_count = actual_edge_counts.get(kind, 0) + assert actual_count == expected_count, f"Expected {expected_count} edges of kind '{kind}', got {actual_count}" + + # 4. Verify DATA edges source from PipeParallel, not from sub-pipes + parallel_node = nodes_by_pipe_code["parallel_summarize"][0] + combine_node = nodes_by_pipe_code["combine_summaries"][0] + data_edges = [edge for edge in pipe_output.graph_spec.edges if edge.kind.is_data] + + for edge in data_edges: + # DATA edges targeting combine_summaries should come from PipeParallel + if edge.target == combine_node.node_id: + assert edge.source == parallel_node.node_id, ( + f"DATA edge to combine_summaries should come from PipeParallel '{parallel_node.node_id}', " + f"but comes from '{edge.source}' (pipe_code: '{nodes_by_id[edge.source].pipe_code}')" + ) + + # 5. Verify PipeParallel node has output specs for both branch outputs + assert len(parallel_node.node_io.outputs) >= 2, ( + f"PipeParallel should have at least 2 output specs (branch outputs), got {len(parallel_node.node_io.outputs)}" + ) + output_names = {output.name for output in parallel_node.node_io.outputs} + assert "short_summary" in output_names, "PipeParallel should have 'short_summary' output" + assert "detailed_summary" in output_names, "PipeParallel should have 'detailed_summary' output" + + # 6. Verify containment: sub-pipes are inside PipeParallel + contains_edges = [edge for edge in pipe_output.graph_spec.edges if edge.kind.is_contains] + parallel_children = {edge.target for edge in contains_edges if edge.source == parallel_node.node_id} + branch_pipe_codes = {"summarize_short", "summarize_detailed"} + branch_node_ids = {node.node_id for pipe_code in branch_pipe_codes for node in nodes_by_pipe_code.get(pipe_code, [])} + assert branch_node_ids.issubset(parallel_children), ( + f"Branch nodes should be children of PipeParallel. Branch IDs: {branch_node_ids}, Parallel children: {parallel_children}" + ) + + # Generate and save graph outputs + graph_outputs = await generate_graph_outputs( + graph_spec=pipe_output.graph_spec, + graph_config=graph_config, + pipe_code="parallel_then_consume", + ) + + output_dir = _get_next_output_folder("add_each") + if graph_outputs.graphspec_json: + save_text_to_path(graph_outputs.graphspec_json, str(output_dir / "graph.json")) + if graph_outputs.mermaidflow_html: + save_text_to_path(graph_outputs.mermaidflow_html, str(output_dir / "mermaidflow.html")) + if graph_outputs.reactflow_html: + save_text_to_path(graph_outputs.reactflow_html, str(output_dir / "reactflow.html")) + + pretty_print( + { + "graph_id": pipe_output.graph_spec.graph_id, + "nodes": len(pipe_output.graph_spec.nodes), + "edges": len(pipe_output.graph_spec.edges), + "edges_by_kind": dict(actual_edge_counts), + "output_dir": str(output_dir), + }, + title="Parallel Add Each Graph Outputs", + ) + + log.info("Structural validation passed: DATA edges correctly source from PipeParallel") + + @pytest.mark.parametrize( + ("pipe_code", "expectations_class"), + [ + ("pgc_analysis_then_summarize", ParallelCombinedGraphExpectations), + ("pg3_sequence", Parallel3BranchGraphExpectations), + ], + ) + async def test_parallel_combined_output_graph( + self, + pipe_run_mode: PipeRunMode, + pipe_code: str, + expectations_class: type[ParallelCombinedGraphExpectationsBase], + ): + """Verify PipeParallel with combined_output generates correct graph structure. + + Parametrized with: + - pgc_analysis_then_summarize: 2-branch PipeParallel wrapped in PipeSequence with follow-up consumer + - pg3_sequence: 3-branch PipeParallel with selective downstream consumption (1 branch unused) + """ + # Build config with graph tracing + base_config = get_config().pipelex.pipeline_execution_config + exec_config = base_config.with_graph_config_overrides( + generate_graph=True, + force_include_full_data=False, + ) + graph_config = exec_config.graph_config.model_copy( + update={ + "graphs_inclusion": exec_config.graph_config.graphs_inclusion.model_copy( + update={ + "graphspec_json": True, + "reactflow_html": True, + } + ) + } + ) + exec_config = exec_config.model_copy(update={"graph_config": graph_config}) + + # Run pipeline + runner = PipelexRunner( + library_dirs=["tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel"], + pipe_run_mode=pipe_run_mode, + execution_config=exec_config, + ) + response = await runner.execute_pipeline( + pipe_code=pipe_code, + inputs={"input_text": TextContent(text="Hello world, this is a test document for parallel analysis.")}, + ) + pipe_output = response.pipe_output + + assert response.pipe_output is not None + assert response.pipe_output.main_stuff is not None + + # Verify graph + graph_spec = pipe_output.graph_spec + assert graph_spec is not None + assert isinstance(graph_spec, GraphSpec) + + log.info(f"Parallel combined graph ({pipe_code}): {len(graph_spec.nodes)} nodes, {len(graph_spec.edges)} edges") + + # Build node lookup + nodes_by_pipe_code: dict[str, list[NodeSpec]] = {} + for node in graph_spec.nodes: + if node.pipe_code: + nodes_by_pipe_code.setdefault(node.pipe_code, []).append(node) + + # 1. Verify all expected pipe_codes exist + actual_pipe_codes = set(nodes_by_pipe_code.keys()) + assert actual_pipe_codes == expectations_class.EXPECTED_PIPE_CODES, ( + f"Unexpected pipe codes. Expected: {expectations_class.EXPECTED_PIPE_CODES}, Got: {actual_pipe_codes}" + ) + + # 2. Verify node counts per pipe_code + for node_pipe_code, expected_count in expectations_class.EXPECTED_NODE_COUNTS.items(): + actual_count = len(nodes_by_pipe_code.get(node_pipe_code, [])) + assert actual_count == expected_count, f"Expected {expected_count} nodes for pipe_code '{node_pipe_code}', got {actual_count}" + + # 3. Verify edge counts by kind + actual_edge_counts = Counter(str(edge.kind) for edge in graph_spec.edges) + for kind, expected_count in expectations_class.EXPECTED_EDGE_COUNTS.items(): + actual_count = actual_edge_counts.get(kind, 0) + assert actual_count == expected_count, f"Expected {expected_count} edges of kind '{kind}', got {actual_count}" + + # 4. Verify PARALLEL_COMBINE edges connect branch producers to the PipeParallel node + parallel_pipe_code = expectations_class.PARALLEL_PIPE_CODE + parallel_node = nodes_by_pipe_code[parallel_pipe_code][0] + parallel_combine_edges = [edge for edge in graph_spec.edges if edge.kind.is_parallel_combine] + expected_combine_count = expectations_class.EXPECTED_EDGE_COUNTS.get("parallel_combine", 0) + assert len(parallel_combine_edges) == expected_combine_count, ( + f"Expected {expected_combine_count} PARALLEL_COMBINE edges, got {len(parallel_combine_edges)}" + ) + for edge in parallel_combine_edges: + assert edge.target == parallel_node.node_id, ( + f"PARALLEL_COMBINE edge target should be PipeParallel '{parallel_node.node_id}', got '{edge.target}'" + ) + assert edge.source_stuff_digest is not None, "PARALLEL_COMBINE edge should have source_stuff_digest" + assert edge.target_stuff_digest is not None, "PARALLEL_COMBINE edge should have target_stuff_digest" + + # Generate and save graph outputs + graph_outputs = await generate_graph_outputs( + graph_spec=graph_spec, + graph_config=graph_config, + pipe_code=pipe_code, + ) + + output_dir = _get_next_output_folder(pipe_code) + if graph_outputs.graphspec_json: + save_text_to_path(graph_outputs.graphspec_json, str(output_dir / "graph.json")) + if graph_outputs.mermaidflow_html: + save_text_to_path(graph_outputs.mermaidflow_html, str(output_dir / "mermaidflow.html")) + if graph_outputs.mermaidflow_mmd: + save_text_to_path(graph_outputs.mermaidflow_mmd, str(output_dir / "mermaidflow.mmd")) + if graph_outputs.reactflow_html: + save_text_to_path(graph_outputs.reactflow_html, str(output_dir / "reactflow.html")) + + pretty_print( + { + "graph_id": graph_spec.graph_id, + "nodes": len(graph_spec.nodes), + "edges": len(graph_spec.edges), + "edges_by_kind": dict(actual_edge_counts), + "parallel_outputs": [output.name for output in parallel_node.node_io.outputs], + "output_dir": str(output_dir), + }, + title=f"Parallel Combined Graph Outputs ({pipe_code})", + ) + + log.info(f"Structural validation passed: {pipe_code} combined_output graph is correct") diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.mthds similarity index 81% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.mthds index 9d6f429d0..586e2e6b3 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.plx +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.mthds @@ -1,10 +1,10 @@ -domain = "discord_newsletter_e2e" +domain = "discord_newsletter_e2e" description = "Create newsletters from Discord channel content by summarizing messages and organizing them according to newsletter format" [concept] DiscordChannelUpdateE2E = "A Discord channel with its messages for newsletter generation" -ChannelSummaryE2E = "A summarized Discord channel for newsletter inclusion" -HtmlNewsletterE2E = "The final newsletter content in html format with organized channel summaries" +ChannelSummaryE2E = "A summarized Discord channel for newsletter inclusion" +HtmlNewsletterE2E = "The final newsletter content in html format with organized channel summaries" [pipe.write_discord_newsletter_e2e] type = "PipeSequence" @@ -12,19 +12,19 @@ description = "Create a newsletter from Discord articles by summarizing channels inputs = { discord_channel_updates = "DiscordChannelUpdateE2E[]" } output = "HtmlNewsletterE2E" steps = [ - { pipe = "summarize_discord_channel_update_e2e", batch_over = "discord_channel_updates", batch_as = "discord_channel_update", result = "channel_summaries" }, - { pipe = "write_weekly_summary_e2e", result = "weekly_summary" }, - { pipe = "format_html_newsletter_e2e", result = "html_newsletter" }, + { pipe = "summarize_discord_channel_update_e2e", batch_over = "discord_channel_updates", batch_as = "discord_channel_update", result = "channel_summaries" }, + { pipe = "write_weekly_summary_e2e", result = "weekly_summary" }, + { pipe = "format_html_newsletter_e2e", result = "html_newsletter" }, ] [pipe.summarize_discord_channel_update_e2e] -type = "PipeCondition" -description = "Select the appropriate summary pipe based on the channel name" -inputs = { discord_channel_update = "DiscordChannelUpdateE2E" } -output = "ChannelSummaryE2E" -expression = "discord_channel_update.name" -outcomes = { "Introduce-Yourself" = "summarize_discord_channel_update_for_new_members_e2e" } +type = "PipeCondition" +description = "Select the appropriate summary pipe based on the channel name" +inputs = { discord_channel_update = "DiscordChannelUpdateE2E" } +output = "ChannelSummaryE2E" +expression = "discord_channel_update.name" +outcomes = { "Introduce-Yourself" = "summarize_discord_channel_update_for_new_members_e2e" } default_outcome = "summarize_discord_channel_update_general_e2e" [pipe.summarize_discord_channel_update_for_new_members_e2e] @@ -77,10 +77,10 @@ Keep it short: 200 characters. """ [pipe.format_html_newsletter_e2e] -type = "PipeCompose" +type = "PipeCompose" description = "Combine weekly and channel summaries into a complete newsletter following specific formatting requirements" -inputs = { weekly_summary = "Text", channel_summaries = "ChannelSummaryE2E[]" } -output = "HtmlNewsletterE2E" +inputs = { weekly_summary = "Text", channel_summaries = "ChannelSummaryE2E[]" } +output = "HtmlNewsletterE2E" [pipe.format_html_newsletter_e2e.template] category = "html" @@ -127,4 +127,3 @@ $weekly_summary {% endfor %} {% endif %} """ - diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_batching.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_batching.py index a31a08abe..a295a0a5e 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_batching.py +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_batching.py @@ -13,8 +13,8 @@ from pipelex.hub import get_required_pipe from pipelex.pipe_run.pipe_run_params import PipeRunMode from pipelex.pipe_run.pipe_run_params_factory import PipeRunParamsFactory -from pipelex.pipeline.execute import execute_pipeline from pipelex.pipeline.job_metadata import JobMetadata +from pipelex.pipeline.runner import PipelexRunner from tests.integration.pipelex.pipes.controller.pipe_sequence.pipe_sequence import Document, ProductRating @@ -67,10 +67,12 @@ async def test_review_analysis_sequence_with_batching( ) working_memory = WorkingMemoryFactory.make_from_single_stuff(document_stuff) # Execute the pipeline - pipe_output = await execute_pipeline( + runner = PipelexRunner() + response = await runner.execute_pipeline( pipe_code="analyze_reviews_sequence", inputs=working_memory, ) + pipe_output = response.pipe_output # Basic output validation assert pipe_output is not None diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_multiplicity.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_multiplicity.py index 40be993ea..ab3b2d2e5 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_multiplicity.py +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_multiplicity.py @@ -7,7 +7,7 @@ from pipelex.core.stuffs.stuff_factory import StuffFactory from pipelex.core.stuffs.text_content import TextContent from pipelex.pipe_run.pipe_run_params import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner @pytest.mark.dry_runnable @@ -31,12 +31,15 @@ async def test_creative_ideation_sequence_with_multiplicity(self, pipe_run_mode: working_memory = WorkingMemoryFactory.make_from_multiple_stuffs([topic_stuff]) # Execute the pipeline - pipe_output = await execute_pipeline( + runner = PipelexRunner( library_dirs=["tests/integration/pipelex/pipes/controller/pipe_sequence/"], + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( pipe_code="creative_ideation_sequence", inputs=working_memory, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output # Basic assertions assert pipe_output is not None diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_with_input_memory.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_with_input_memory.py index 7e3b2b077..f943d07b9 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_with_input_memory.py +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_pipe_sequence_with_input_memory.py @@ -4,7 +4,7 @@ from pipelex import pretty_print from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from tests.e2e.pipelex.pipes.pipe_controller.pipe_sequence.test_tweet import OptimizedTweet SAMPLE_DRAFT_TWEET = """ @@ -94,8 +94,11 @@ class TestPipeSequenceWithInputMemory: async def test_optimize_tweet_sequence_with_input_memory(self, pipe_run_mode: PipeRunMode): """Test the optimize_tweet_sequence pipeline using inputs parameter.""" # Execute the pipeline using inputs - pipe_output = await execute_pipeline( + runner = PipelexRunner( library_dirs=["tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/"], + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( pipe_code="optimize_tweet_sequence", inputs={ "draft_tweet": { @@ -107,8 +110,8 @@ async def test_optimize_tweet_sequence_with_input_memory(self, pipe_run_mode: Pi "content": SAMPLE_WRITING_STYLE, }, }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output pretty_print(pipe_output, title="Pipe output for optimize_tweet_sequence") # Get the optimized tweet diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.mthds similarity index 87% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.mthds index 25fe6e43b..79a13f822 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.plx +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.mthds @@ -1,11 +1,11 @@ -domain = "tech_tweet" +domain = "tech_tweet" description = "A pipeline for optimizing tech tweets using Twitter/X best practices" [concept] -DraftTweet = "A draft version of a tech tweet that needs optimization" +DraftTweet = "A draft version of a tech tweet that needs optimization" OptimizedTweet = "A tweet optimized for Twitter/X engagement following best practices" -TweetAnalysis = "Analysis of the tweet's structure and potential improvements" -WritingStyle = "A style of writing" +TweetAnalysis = "Analysis of the tweet's structure and potential improvements" +WritingStyle = "A style of writing" [pipe] [pipe.analyze_tweet] @@ -76,7 +76,6 @@ description = "Analyze and optimize a tech tweet in sequence" inputs = { draft_tweet = "DraftTweet", writing_style = "WritingStyle" } output = "OptimizedTweet" steps = [ - { pipe = "analyze_tweet", result = "tweet_analysis" }, - { pipe = "optimize_tweet", result = "optimized_tweet" }, + { pipe = "analyze_tweet", result = "tweet_analysis" }, + { pipe = "optimize_tweet", result = "optimized_tweet" }, ] - diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.mthds similarity index 74% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.mthds index e87487e43..09828b1d7 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.plx +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.mthds @@ -1,13 +1,13 @@ -domain = "cv_job_matching" +domain = "cv_job_matching" description = "Analyzing CV and job offer compatibility and generating interview questions" -main_pipe = "cv_job_matcher" +main_pipe = "cv_job_matcher" [concept] -CVAnalysis = "Structured analysis of a candidate's curriculum vitae highlighting their professional profile." -JobRequirements = "Structured analysis of a job offer detailing what the employer is seeking." -MatchAnalysis = "Evaluation of how well a candidate aligns with job requirements." +CVAnalysis = "Structured analysis of a candidate's curriculum vitae highlighting their professional profile." +JobRequirements = "Structured analysis of a job offer detailing what the employer is seeking." +MatchAnalysis = "Evaluation of how well a candidate aligns with job requirements." InterviewQuestion = "A targeted question designed for a job interview with its underlying purpose." -InterviewSheet = "A comprehensive interview preparation document combining match analysis with targeted interview questions." +InterviewSheet = "A comprehensive interview preparation document combining match analysis with targeted interview questions." [pipe.cv_job_matcher] type = "PipeSequence" @@ -17,11 +17,11 @@ Main pipeline that processes CV and job offer PDFs, analyzes their match, and ge inputs = { cv_pdf = "Document", job_offer_pdf = "Document" } output = "InterviewSheet" steps = [ - { pipe = "extract_documents", result = "extracted_documents" }, - { pipe = "analyze_documents", result = "analyzed_documents" }, - { pipe = "evaluate_match", result = "match_analysis" }, - { pipe = "generate_interview_questions", result = "interview_questions" }, - { pipe = "compose_interview_sheet", result = "interview_sheet" }, + { pipe = "extract_documents", result = "extracted_documents" }, + { pipe = "analyze_documents", result = "analyzed_documents" }, + { pipe = "evaluate_match", result = "match_analysis" }, + { pipe = "generate_interview_questions", result = "interview_questions" }, + { pipe = "compose_interview_sheet", result = "interview_sheet" }, ] [pipe.extract_documents] @@ -29,34 +29,34 @@ type = "PipeParallel" description = "Extracts text content from both the CV and job offer PDFs concurrently" inputs = { cv_pdf = "Document", job_offer_pdf = "Document" } output = "Page[]" -parallels = [ - { pipe = "extract_cv", result = "cv_pages" }, - { pipe = "extract_job_offer", result = "job_offer_pages" }, +branches = [ + { pipe = "extract_cv", result = "cv_pages" }, + { pipe = "extract_job_offer", result = "job_offer_pages" }, ] add_each_output = true [pipe.extract_cv] -type = "PipeExtract" +type = "PipeExtract" description = "Extracts text content from the CV PDF document" -inputs = { cv_pdf = "Document" } -output = "Page[]" -model = "@default-text-from-pdf" +inputs = { cv_pdf = "Document" } +output = "Page[]" +model = "@default-text-from-pdf" [pipe.extract_job_offer] -type = "PipeExtract" +type = "PipeExtract" description = "Extracts text content from the job offer PDF document" -inputs = { job_offer_pdf = "Document" } -output = "Page[]" -model = "@default-text-from-pdf" +inputs = { job_offer_pdf = "Document" } +output = "Page[]" +model = "@default-text-from-pdf" [pipe.analyze_documents] type = "PipeParallel" description = "Analyzes both the CV and job offer documents concurrently to extract structured information" inputs = { cv_pages = "Page", job_offer_pages = "Page" } output = "Text" -parallels = [ - { pipe = "analyze_cv", result = "cv_analysis" }, - { pipe = "analyze_job_offer", result = "job_requirements" }, +branches = [ + { pipe = "analyze_cv", result = "cv_analysis" }, + { pipe = "analyze_job_offer", result = "job_requirements" }, ] add_each_output = true @@ -153,10 +153,10 @@ inputs = { match_analysis = "MatchAnalysis", interview_questions = "InterviewQue output = "InterviewSheet" [pipe.compose_interview_sheet.construct] -overall_match_score = { from = "match_analysis.overall_match_score" } -matching_skills = { from = "match_analysis.matching_skills" } -missing_skills = { from = "match_analysis.missing_skills" } +overall_match_score = { from = "match_analysis.overall_match_score" } +matching_skills = { from = "match_analysis.matching_skills" } +missing_skills = { from = "match_analysis.missing_skills" } experience_alignment = { from = "match_analysis.experience_alignment" } -areas_of_concern = { from = "match_analysis.areas_of_concern" } -areas_to_explore = { from = "match_analysis.areas_to_explore" } -questions = { from = "interview_questions" } +areas_of_concern = { from = "match_analysis.areas_of_concern" } +areas_to_explore = { from = "match_analysis.areas_to_explore" } +questions = { from = "interview_questions" } diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py index 4b7dae325..588a1e206 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py index 6e81215b0..131392fdc 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py index cf0de5173..8b959acaf 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py index 7fb93ab2d..1e611255a 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py index d5aec53fb..735e1280e 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/test_pipe_compose_after_inference.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/test_pipe_compose_after_inference.py index 238fd4968..75771d5b2 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/test_pipe_compose_after_inference.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/test_pipe_compose_after_inference.py @@ -5,7 +5,7 @@ from pipelex import pretty_print from pipelex.core.stuffs.document_content import DocumentContent from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from tests.cases import DocumentTestCases from tests.e2e.pipelex.pipes.pipe_operators.pipe_compose.cv_job_matching_itvw_sheet import InterviewSheet @@ -20,15 +20,18 @@ class TestPipeComposeAfterInference: async def test_pipe_compose_after_inference(self, pipe_run_mode: PipeRunMode): """Test a pipe which uses inference to analyze stuff and then uses PipeCompose to compose a structured content.""" - pipe_output = await execute_pipeline( - pipe_code="cv_job_matcher", + runner = PipelexRunner( library_dirs=["tests/e2e/pipelex/pipes/pipe_operators/pipe_compose"], + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( + pipe_code="cv_job_matcher", inputs={ "cv_pdf": DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_CV), "job_offer_pdf": DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_2), }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output # Basic assertions assert pipe_output is not None diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.mthds similarity index 60% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.mthds index 766a1b85b..e5de731a7 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.plx +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.mthds @@ -1,13 +1,13 @@ -domain = "pipe_img_gen_e2e" +domain = "pipe_img_gen_e2e" description = "E2E tests for PipeImgGen operator including text-to-image and img2img" # Text-to-Image Pipes [pipe.generate_image_basic_e2e] -type = "PipeImgGen" +type = "PipeImgGen" description = "Generate a single image from a text prompt" -output = "Image" -prompt = "A colorful landscape with mountains and a river at sunset" -model = "$gen-image-testing" +output = "Image" +prompt = "A colorful landscape with mountains and a river at sunset" +model = "$gen-image-testing" # [pipe.generate_image_with_negative_e2e] # type = "PipeImgGen" @@ -18,29 +18,29 @@ model = "$gen-image-testing" # model = "qwen-image" [pipe.generate_image_from_input_e2e] -type = "PipeImgGen" +type = "PipeImgGen" description = "Generate an image from a dynamic text prompt" -inputs = { image_prompt = "Text" } -output = "Image" -prompt = "$image_prompt" -model = "$gen-image-testing" +inputs = { image_prompt = "Text" } +output = "Image" +prompt = "$image_prompt" +model = "$gen-image-testing" # img2img Pipes [pipe.img2img_single_input_e2e] -type = "PipeImgGen" +type = "PipeImgGen" description = "Edit an image based on a text prompt" -inputs = { source_image = "Image" } -output = "Image" -prompt = "Add a colorful sunset sky in the background. $source_image" -model = "$gen-image-testing-img2img" +inputs = { source_image = "Image" } +output = "Image" +prompt = "Add a colorful sunset sky in the background. $source_image" +model = "$gen-image-testing-img2img" [pipe.img2img_style_transfer_e2e] -type = "PipeImgGen" +type = "PipeImgGen" description = "Transform an image into a different artistic style" -inputs = { source_image = "Image" } -output = "Image" -prompt = "Transform this image into a watercolor painting style. $source_image" -model = "$gen-image-testing-img2img" +inputs = { source_image = "Image" } +output = "Image" +prompt = "Transform this image into a watercolor painting style. $source_image" +model = "$gen-image-testing-img2img" [pipe.img2img_blend_two_images_e2e] type = "PipeImgGen" diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/test_pipe_img_gen.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/test_pipe_img_gen.py index 2c9aec724..51186ad89 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/test_pipe_img_gen.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/test_pipe_img_gen.py @@ -5,7 +5,7 @@ from pipelex import pretty_print from pipelex.core.stuffs.image_content import ImageContent from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from tests.cases import ImageTestCases LIBRARY_DIRS = ["tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen"] @@ -20,11 +20,14 @@ class TestPipeImgGen: async def test_generate_image_basic(self, pipe_run_mode: PipeRunMode): """Test basic text-to-image generation with a simple prompt.""" - pipe_output = await execute_pipeline( - pipe_code="generate_image_basic_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="generate_image_basic_e2e", + ) + pipe_output = response.pipe_output assert pipe_output is not None assert pipe_output.working_memory is not None @@ -38,11 +41,14 @@ async def test_generate_image_basic(self, pipe_run_mode: PipeRunMode): @pytest.mark.xfail(reason="Negative prompt is not supported by most models and when it is, it doesn't work well", strict=False) async def test_generate_image_with_negative_prompt(self, pipe_run_mode: PipeRunMode): """Test text-to-image generation with negative prompt.""" - pipe_output = await execute_pipeline( - pipe_code="generate_image_with_negative_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="generate_image_with_negative_e2e", + ) + pipe_output = response.pipe_output assert pipe_output is not None assert pipe_output.working_memory is not None @@ -55,12 +61,15 @@ async def test_generate_image_with_negative_prompt(self, pipe_run_mode: PipeRunM async def test_generate_image_from_text(self, pipe_run_mode: PipeRunMode): """Test image generation with dynamic prompt from input.""" - pipe_output = await execute_pipeline( - pipe_code="generate_image_from_input_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, - inputs={"image_prompt": "A serene Japanese garden with cherry blossoms"}, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="generate_image_from_input_e2e", + inputs={"image_prompt": "A serene Japanese garden with cherry blossoms"}, + ) + pipe_output = response.pipe_output assert pipe_output is not None assert pipe_output.working_memory is not None @@ -85,12 +94,15 @@ async def test_img2img_from_single_image( image_uri: str, ): """Test img2img with a single input image.""" - pipe_output = await execute_pipeline( - pipe_code="img2img_single_input_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, - inputs={"source_image": ImageContent(url=image_uri)}, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="img2img_single_input_e2e", + inputs={"source_image": ImageContent(url=image_uri)}, + ) + pipe_output = response.pipe_output assert pipe_output is not None assert pipe_output.working_memory is not None @@ -115,12 +127,15 @@ async def test_img2img_style_transfer( image_uri: str, ): """Test img2img style transfer transformation.""" - pipe_output = await execute_pipeline( - pipe_code="img2img_style_transfer_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, - inputs={"source_image": ImageContent(url=image_uri)}, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="img2img_style_transfer_e2e", + inputs={"source_image": ImageContent(url=image_uri)}, + ) + pipe_output = response.pipe_output assert pipe_output is not None assert pipe_output.working_memory is not None @@ -147,15 +162,18 @@ async def test_img2img_blend_two_images( subject_image_uri: str, ): """Test blending two images: style from one and subject from the other.""" - pipe_output = await execute_pipeline( - pipe_code="img2img_blend_two_images_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( + pipe_code="img2img_blend_two_images_e2e", inputs={ "style_image": ImageContent(url=style_image_uri), "subject_image": ImageContent(url=subject_image_uri), }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output assert pipe_output is not None assert pipe_output.working_memory is not None diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.mthds similarity index 90% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.mthds index 16f02b8b7..90905fe46 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.plx +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.mthds @@ -1,10 +1,10 @@ -domain = "pipe_llm_document_inputs_e2e" +domain = "pipe_llm_document_inputs_e2e" description = "E2E tests for document input handling in PipeLLM" [concept] -DocumentSummaryE2E = "Summary of a document" +DocumentSummaryE2E = "Summary of a document" DocumentListAnalysisE2E = "Analysis of multiple documents" -MixedMediaAnalysisE2E = "Analysis of documents and images together" +MixedMediaAnalysisE2E = "Analysis of documents and images together" # Scenario 1: Direct document [pipe.summarize_single_document_e2e] diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.mthds similarity index 76% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.mthds index 109ac6796..9a4787f77 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.plx +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.mthds @@ -1,4 +1,4 @@ -domain = "pipe_llm_filename_html_e2e" +domain = "pipe_llm_filename_html_e2e" description = "E2E tests for filename in PipeCompose HTML template" [concept] @@ -10,8 +10,8 @@ description = "Describe inputs then compose HTML with filenames" inputs = { image = "Image", document = "Document" } output = "Html" steps = [ - { pipe = "describe_inputs_e2e", result = "descriptions" }, - { pipe = "compose_filename_html_e2e", result = "filename_html" }, + { pipe = "describe_inputs_e2e", result = "descriptions" }, + { pipe = "compose_filename_html_e2e", result = "filename_html" }, ] [pipe.describe_inputs_e2e] @@ -28,10 +28,10 @@ Document: $document """ [pipe.compose_filename_html_e2e] -type = "PipeCompose" +type = "PipeCompose" description = "Compose HTML with filenames and descriptions" -inputs = { image = "Image", document = "Document", descriptions = "InputDescriptionsE2E" } -output = "Html" +inputs = { image = "Image", document = "Document", descriptions = "InputDescriptionsE2E" } +output = "Html" [pipe.compose_filename_html_e2e.template] category = "html" diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.mthds similarity index 94% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.mthds index e17d228ee..47e7ec136 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.plx +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.mthds @@ -1,10 +1,10 @@ -domain = "pipe_llm_image_inputs_e2e" +domain = "pipe_llm_image_inputs_e2e" description = "E2E tests for image input handling in PipeLLM" [concept] -ImageDescriptionE2E = "Description of an image" +ImageDescriptionE2E = "Description of an image" ImageListAnalysisE2E = "Analysis of multiple images" -PageDescriptionE2E = "Description of a page" +PageDescriptionE2E = "Description of a page" # Scenario 1: Direct image [pipe.describe_single_image_e2e] diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.mthds similarity index 92% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.mthds index 9af66d8ac..caf2ba7d1 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.plx +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.mthds @@ -1,13 +1,13 @@ -domain = "pipe_llm_vision_e2e" +domain = "pipe_llm_vision_e2e" description = "Test PipeLLM with vision capabilities" [concept] -VisionAnalysisE2E = "Some analysis based on the image" +VisionAnalysisE2E = "Some analysis based on the image" BasicDescriptionE2E = "Basic description of the image" [concept.PhotoE2E] description = "A photo" -refines = "Image" +refines = "Image" [pipe.describe_image_e2e] type = "PipeLLM" @@ -54,4 +54,3 @@ prompt = """ Analyze this image and describe what's the main topic (be concise). $image """ - diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_document_inputs.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_document_inputs.py index 88d099b32..0b9fd5c3f 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_document_inputs.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_document_inputs.py @@ -7,7 +7,7 @@ from pipelex.core.stuffs.image_content import ImageContent from pipelex.core.stuffs.list_content import ListContent from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from tests.cases.documents import DocumentTestCases from tests.e2e.pipelex.pipes.pipe_operators.pipe_llm.pipe_llm_document_inputs import ( DocumentListAnalysisE2E, @@ -28,36 +28,32 @@ class TestDocumentInputsE2E: async def test_direct_single_document(self, pipe_run_mode: PipeRunMode) -> None: """Test single direct document input with local PDF file.""" - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner(library_dirs=LIBRARY_DIRS, pipe_run_mode=pipe_run_mode).execute_pipeline( pipe_code="summarize_single_document_e2e", - library_dirs=LIBRARY_DIRS, inputs={ "document": DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_2), }, - pipe_run_mode=pipe_run_mode, ) - assert pipe_output.main_stuff is not None + assert pipeline_response.pipe_output.main_stuff is not None if pipe_run_mode.is_live: - result = pipe_output.main_stuff_as(content_type=DocumentSummaryE2E) + result = pipeline_response.pipe_output.main_stuff_as(content_type=DocumentSummaryE2E) pretty_print(result, title="Direct Document Summary") assert len(result.summary) > 10 assert result.document_type.lower() in {"job offer", "job", "offer", "employment", "contract"} async def test_direct_single_document_by_url(self, pipe_run_mode: PipeRunMode) -> None: """Test single direct document input with remote URL.""" - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner(library_dirs=LIBRARY_DIRS, pipe_run_mode=pipe_run_mode).execute_pipeline( pipe_code="summarize_single_document_e2e", - library_dirs=LIBRARY_DIRS, inputs={ "document": DocumentContent(url=DocumentTestCases.PDF_FILE_URL_1), }, - pipe_run_mode=pipe_run_mode, ) - assert pipe_output.main_stuff is not None + assert pipeline_response.pipe_output.main_stuff is not None if pipe_run_mode.is_live: - result = pipe_output.main_stuff_as(content_type=DocumentSummaryE2E) + result = pipeline_response.pipe_output.main_stuff_as(content_type=DocumentSummaryE2E) pretty_print(result, title="Document Summary (URL)") assert len(result.summary) > 10 @@ -70,16 +66,14 @@ async def test_document_list_input(self, pipe_run_mode: PipeRunMode) -> None: ] ) - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner(library_dirs=LIBRARY_DIRS, pipe_run_mode=pipe_run_mode).execute_pipeline( pipe_code="analyze_document_list_e2e", - library_dirs=LIBRARY_DIRS, inputs={"documents": documents}, - pipe_run_mode=pipe_run_mode, ) - assert pipe_output.main_stuff is not None + assert pipeline_response.pipe_output.main_stuff is not None if pipe_run_mode.is_live: - analysis = pipe_output.main_stuff_as(content_type=DocumentListAnalysisE2E) + analysis = pipeline_response.pipe_output.main_stuff_as(content_type=DocumentListAnalysisE2E) pretty_print(analysis, title="Document List Analysis") assert analysis.document_count == 2 @@ -88,38 +82,34 @@ async def test_compare_document_lists(self, pipe_run_mode: PipeRunMode) -> None: collection_a = ListContent[DocumentContent](items=[DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_2)]) collection_b = ListContent[DocumentContent](items=[DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_3)]) - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner(library_dirs=LIBRARY_DIRS, pipe_run_mode=pipe_run_mode).execute_pipeline( pipe_code="compare_document_lists_e2e", - library_dirs=LIBRARY_DIRS, inputs={ "collection_a": collection_a, "collection_b": collection_b, }, - pipe_run_mode=pipe_run_mode, ) - assert pipe_output.main_stuff is not None + assert pipeline_response.pipe_output.main_stuff is not None if pipe_run_mode.is_live: - analysis = pipe_output.main_stuff_as(content_type=DocumentListAnalysisE2E) + analysis = pipeline_response.pipe_output.main_stuff_as(content_type=DocumentListAnalysisE2E) pretty_print(analysis, title="Document Lists Comparison") # Should count both collections assert analysis.document_count == 2 async def test_mixed_document_and_image_inputs(self, pipe_run_mode: PipeRunMode) -> None: """Test combining document with image input.""" - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner(library_dirs=LIBRARY_DIRS, pipe_run_mode=pipe_run_mode).execute_pipeline( pipe_code="mixed_document_image_inputs_e2e", - library_dirs=LIBRARY_DIRS, inputs={ "document": DocumentContent(url=DocumentTestCases.PDF_FILE_PATH_2), "image": ImageContent(url=LLMVisionTestCases.URL_CLOUDFRONT_ALAN_TURING_JPG), }, - pipe_run_mode=pipe_run_mode, ) - assert pipe_output.main_stuff is not None + assert pipeline_response.pipe_output.main_stuff is not None if pipe_run_mode.is_live: - result = pipe_output.main_stuff_as(content_type=MixedMediaAnalysisE2E) + result = pipeline_response.pipe_output.main_stuff_as(content_type=MixedMediaAnalysisE2E) pretty_print(result, title="Mixed Document + Image Analysis") assert result.can_see_both is True assert len(result.document_summary) > 10 diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_filename_html.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_filename_html.py index 2293917d0..b7f81f620 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_filename_html.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_filename_html.py @@ -5,7 +5,7 @@ from pipelex.core.stuffs.document_content import DocumentContent from pipelex.core.stuffs.image_content import ImageContent from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from pipelex.urls import URLs from tests.cases.documents import DocumentTestCases from tests.cases.images import ImageTestCases @@ -33,15 +33,18 @@ async def test_filename_in_compose_html(self, pipe_run_mode: PipeRunMode) -> Non assert ImageContent(url=URLs.png_example_1).filename is None assert DocumentContent(url=URLs.pdf_example_1).filename is None - pipe_output = await execute_pipeline( - pipe_code="describe_with_filenames_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( + pipe_code="describe_with_filenames_e2e", inputs={ "image": image_content, "document": document_content, }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_image_inputs.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_image_inputs.py index 38702b979..7eb6ff6b0 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_image_inputs.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_image_inputs.py @@ -9,7 +9,7 @@ from pipelex.core.stuffs.text_and_images_content import TextAndImagesContent from pipelex.core.stuffs.text_content import TextContent from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from tests.cases import ImageTestCases from tests.e2e.pipelex.pipes.pipe_operators.pipe_llm.pipe_llm_image_inputs import ( ImageDescriptionE2E, @@ -30,14 +30,17 @@ class TestImageInputsE2E: async def test_direct_single_image(self, pipe_run_mode: PipeRunMode) -> None: """Test single direct image input.""" - pipe_output = await execute_pipeline( - pipe_code="describe_single_image_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( + pipe_code="describe_single_image_e2e", inputs={ "image": ImageContent(url=LLMVisionTestCases.URL_CLOUDFRONT_ALAN_TURING_JPG), }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: @@ -54,12 +57,15 @@ async def test_image_list_input(self, pipe_run_mode: PipeRunMode) -> None: ] ) - pipe_output = await execute_pipeline( - pipe_code="analyze_image_list_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, - inputs={"images": images}, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="analyze_image_list_e2e", + inputs={"images": images}, + ) + pipe_output = response.pipe_output assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: @@ -72,15 +78,18 @@ async def test_compare_image_lists(self, pipe_run_mode: PipeRunMode) -> None: collection_a = ListContent[ImageContent](items=[ImageContent(url=LLMVisionTestCases.URL_CLOUDFRONT_ALAN_TURING_JPG)]) collection_b = ListContent[ImageContent](items=[ImageContent(url=ImageTestCases.LOGO_TINY_PNG_DATA_URL)]) - pipe_output = await execute_pipeline( - pipe_code="compare_image_lists_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( + pipe_code="compare_image_lists_e2e", inputs={ "collection_a": collection_a, "collection_b": collection_b, }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: @@ -100,12 +109,15 @@ async def test_page_with_images_filter_extracts_images(self, pipe_run_mode: Pipe page_view=ImageContent(url=ImageTestCases.LOGO_TINY_PNG_DATA_URL), ) - pipe_output = await execute_pipeline( - pipe_code="describe_page_with_images_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, - inputs={"page": page_content}, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="describe_page_with_images_e2e", + inputs={"page": page_content}, + ) + pipe_output = response.pipe_output assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: @@ -126,12 +138,15 @@ async def test_page_without_filter_no_images_sent(self, pipe_run_mode: PipeRunMo page_view=ImageContent(url=ImageTestCases.LOGO_TINY_PNG_DATA_URL), ) - pipe_output = await execute_pipeline( - pipe_code="describe_page_text_only_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, - inputs={"page": page_content}, pipe_run_mode=pipe_run_mode, ) + response = await runner.execute_pipeline( + pipe_code="describe_page_text_only_e2e", + inputs={"page": page_content}, + ) + pipe_output = response.pipe_output assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: @@ -150,15 +165,18 @@ async def test_mixed_direct_and_nested_images(self, pipe_run_mode: PipeRunMode) page_view=ImageContent(url=ImageTestCases.LOGO_TINY_PNG_DATA_URL), ) - pipe_output = await execute_pipeline( - pipe_code="mixed_image_inputs_e2e", + runner = PipelexRunner( library_dirs=LIBRARY_DIRS, + pipe_run_mode=pipe_run_mode, + ) + response = await runner.execute_pipeline( + pipe_code="mixed_image_inputs_e2e", inputs={ "direct_image": ImageContent(url=LLMVisionTestCases.URL_CLOUDFRONT_ALAN_TURING_JPG), "page": page_content, }, - pipe_run_mode=pipe_run_mode, ) + pipe_output = response.pipe_output assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_vision.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_vision.py index 7216162a8..6847f348a 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_vision.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/test_pipe_llm_vision.py @@ -5,7 +5,7 @@ from pipelex import pretty_print, pretty_print_md from pipelex.core.stuffs.image_content import ImageContent from pipelex.pipe_run.pipe_run_mode import PipeRunMode -from pipelex.pipeline.execute import execute_pipeline +from pipelex.pipeline.runner import PipelexRunner from tests.e2e.pipelex.pipes.pipe_operators.pipe_llm.pipe_llm_vision import VisionAnalysisE2E from tests.integration.pipelex.cogt.test_data import LLMVisionTestCases from tests.integration.pipelex.test_data import PipeTestCases @@ -18,22 +18,22 @@ class TestPipeLLMVision: async def test_describe_image_single(self, pipe_run_mode: PipeRunMode): # Execute the pipeline with an image - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner( + library_dirs=["tests/e2e/pipelex/pipes/pipe_operators"], pipe_run_mode=pipe_run_mode + ).execute_pipeline( pipe_code="describe_image_e2e", - library_dirs=["tests/e2e/pipelex/pipes/pipe_operators"], inputs={ "image": ImageContent(url=LLMVisionTestCases.URL_CLOUDFRONT_ALAN_TURING_JPG), }, - pipe_run_mode=pipe_run_mode, ) # Get the result as text - result_text = pipe_output.main_stuff_as_str + result_text = pipeline_response.pipe_output.main_stuff_as_str # Basic assertions - assert pipe_output is not None - assert pipe_output.working_memory is not None - assert pipe_output.main_stuff is not None + assert pipeline_response.pipe_output is not None + assert pipeline_response.pipe_output.working_memory is not None + assert pipeline_response.pipe_output.main_stuff is not None assert result_text is not None assert len(result_text) > 0 @@ -53,33 +53,33 @@ async def test_describe_image_single(self, pipe_run_mode: PipeRunMode): async def test_describe_images_multiple(self, pipe_run_mode: PipeRunMode, pipe_code: str): """Test the describe_image pipeline with multiple images to discriminate.""" # Execute the pipeline with an image - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner( + library_dirs=["tests/e2e/pipelex/pipes/pipe_operators"], pipe_run_mode=pipe_run_mode + ).execute_pipeline( pipe_code=pipe_code, - library_dirs=["tests/e2e/pipelex/pipes/pipe_operators"], inputs={ "image_a": ImageContent(url=LLMVisionTestCases.URL_CLOUDFRONT_ALAN_TURING_JPG), "image_b": ImageContent(url=PipeTestCases.URL_IMG_FASHION_PHOTO_1), }, - pipe_run_mode=pipe_run_mode, ) - description = pipe_output.main_stuff_as_str + description = pipeline_response.pipe_output.main_stuff_as_str pretty_print_md(description, title=f"Image Description ({pipe_code})") async def test_structured_analysis_of_image_with_gantt_chart(self, pipe_run_mode: PipeRunMode): """Test vision with a more complex image (Gantt chart).""" # Execute the pipeline with a complex image - pipe_output = await execute_pipeline( + pipeline_response = await PipelexRunner( + library_dirs=["tests/e2e/pipelex/pipes/pipe_operators"], pipe_run_mode=pipe_run_mode + ).execute_pipeline( pipe_code="vision_analysis_e2e", - library_dirs=["tests/e2e/pipelex/pipes/pipe_operators"], inputs={ "image": ImageContent(url=PipeTestCases.URL_IMG_GANTT_PNG), }, - pipe_run_mode=pipe_run_mode, ) # Get the result as text - result = pipe_output.main_stuff_as(content_type=VisionAnalysisE2E) + result = pipeline_response.pipe_output.main_stuff_as(content_type=VisionAnalysisE2E) # Log output pretty_print(result, title="Gantt Chart Description") diff --git a/tests/helpers/init_cmd_helpers.py b/tests/helpers/init_cmd_helpers.py index d6e677209..d23af1c23 100644 --- a/tests/helpers/init_cmd_helpers.py +++ b/tests/helpers/init_cmd_helpers.py @@ -181,6 +181,13 @@ def mock_config_manager_paths(self) -> None: """Mock config_manager to use tmp_path.""" self.mock_config_manager = self.mocker.MagicMock() self.mock_config_manager.pipelex_config_dir = str(self.pipelex_dir) + self.mock_config_manager.global_config_dir = str(self.pipelex_dir) + self.mock_config_manager.project_config_dir = str(self.pipelex_dir) + self.mock_config_manager.project_root = str(self.tmp_path) + self.mock_config_manager.backends_file_path = str(self.inference_dir / "backends.toml") + self.mock_config_manager.backends_dir_path = str(self.inference_dir / "backends") + self.mock_config_manager.routing_profiles_file_path = str(self.inference_dir / "routing_profiles.toml") + self.mock_config_manager.model_decks_dir_path = str(self.inference_dir / "deck") # Patch all locations where config_manager is used self.mocker.patch("pipelex.cli.commands.init.command.config_manager", self.mock_config_manager) diff --git a/tests/integration/pipelex/builder/test_builder_plx_validation.py b/tests/integration/pipelex/builder/test_builder_mthds_validation.py similarity index 78% rename from tests/integration/pipelex/builder/test_builder_plx_validation.py rename to tests/integration/pipelex/builder/test_builder_mthds_validation.py index 68be4481c..acf085cba 100644 --- a/tests/integration/pipelex/builder/test_builder_plx_validation.py +++ b/tests/integration/pipelex/builder/test_builder_mthds_validation.py @@ -1,6 +1,6 @@ -"""Tests for validating builder domain PLX files. +"""Tests for validating builder domain MTHDS files. -This module tests that builder.plx and agentic_builder.plx are valid and that +This module tests that builder.mthds and agentic_builder.mthds are valid and that input/output types are correctly declared, especially for pipes that receive batched outputs (lists) from previous steps. """ @@ -18,21 +18,21 @@ class TestData: - """Test data for builder PLX validation tests.""" + """Test data for builder MTHDS validation tests.""" - BUILDER_PLX_PATH: ClassVar[Path] = BUILDER_DIR / "builder.plx" - AGENTIC_BUILDER_PLX_PATH: ClassVar[Path] = BUILDER_DIR / "agentic_builder.plx" - PIPE_DESIGN_PLX_PATH: ClassVar[Path] = BUILDER_DIR / "pipe" / "pipe_design.plx" + BUILDER_MTHDS_PATH: ClassVar[Path] = BUILDER_DIR / "builder.mthds" + AGENTIC_BUILDER_MTHDS_PATH: ClassVar[Path] = BUILDER_DIR / "agentic_builder.mthds" + PIPE_DESIGN_MTHDS_PATH: ClassVar[Path] = BUILDER_DIR / "pipe" / "pipe_design.mthds" -class TestBuilderPlxValidation: - """Tests that builder domain PLX files are valid and type-consistent.""" +class TestBuilderMthdsValidation: + """Tests that builder domain MTHDS files are valid and type-consistent.""" @pytest.mark.asyncio(loop_scope="class") - async def test_builder_plx_loads_and_validates(self): - """Test that builder.plx can be loaded and validated successfully.""" + async def test_builder_mthds_loads_and_validates(self): + """Test that builder.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.BUILDER_PLX_PATH, + mthds_file_path=TestData.BUILDER_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -42,10 +42,10 @@ async def test_builder_plx_loads_and_validates(self): assert len(result.pipes) > 0 @pytest.mark.asyncio(loop_scope="class") - async def test_agentic_builder_plx_loads_and_validates(self): - """Test that agentic_builder.plx can be loaded and validated successfully.""" + async def test_agentic_builder_mthds_loads_and_validates(self): + """Test that agentic_builder.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.AGENTIC_BUILDER_PLX_PATH, + mthds_file_path=TestData.AGENTIC_BUILDER_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -55,10 +55,10 @@ async def test_agentic_builder_plx_loads_and_validates(self): assert len(result.pipes) > 0 @pytest.mark.asyncio(loop_scope="class") - async def test_pipe_design_plx_loads_and_validates(self): - """Test that pipe_design.plx can be loaded and validated successfully.""" + async def test_pipe_design_mthds_loads_and_validates(self): + """Test that pipe_design.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.PIPE_DESIGN_PLX_PATH, + mthds_file_path=TestData.PIPE_DESIGN_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -68,15 +68,15 @@ async def test_pipe_design_plx_loads_and_validates(self): assert len(result.pipes) > 0 def test_assemble_pipelex_bundle_spec_has_list_inputs_in_builder(self): - """Test that assemble_pipelex_bundle_spec declares list inputs correctly in builder.plx. + """Test that assemble_pipelex_bundle_spec declares list inputs correctly in builder.mthds. This test catches the bug where pipe_specs was incorrectly declared as "pipe_design.PipeSpec" instead of "pipe_design.PipeSpec[]" when the pipe receives the output of a batch_over operation which produces a list. - See: builder.plx line 31 (batch_over produces list) and line 332 (input declaration) + See: builder.mthds line 31 (batch_over produces list) and line 332 (input declaration) """ - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_PLX_PATH) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_MTHDS_PATH) assert blueprint.pipe is not None assert "assemble_pipelex_bundle_spec" in blueprint.pipe @@ -95,12 +95,12 @@ def test_assemble_pipelex_bundle_spec_has_list_inputs_in_builder(self): assert "[]" in concept_specs_input, f"concept_specs must be declared as a list (with []). Got: {concept_specs_input}" def test_detail_all_pipe_specs_outputs_list_in_agentic_builder(self): - """Test that detail_all_pipe_specs declares list output in agentic_builder.plx. + """Test that detail_all_pipe_specs declares list output in agentic_builder.mthds. This test verifies that the PipeBatch that generates pipe_specs correctly declares its output as a list, which is then consumed by assemble_pipelex_bundle_spec. """ - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.AGENTIC_BUILDER_PLX_PATH) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.AGENTIC_BUILDER_MTHDS_PATH) assert blueprint.pipe is not None assert "detail_all_pipe_specs" in blueprint.pipe @@ -114,10 +114,10 @@ def test_detail_all_pipe_specs_outputs_list_in_agentic_builder(self): def test_batch_over_result_consistency_with_subsequent_inputs(self): """Test that batch_over results are consumed by pipes with matching list inputs. - In builder.plx, pipe_builder uses batch_over on detail_pipe_spec to produce pipe_specs. + In builder.mthds, pipe_builder uses batch_over on detail_pipe_spec to produce pipe_specs. The subsequent assemble_pipelex_bundle_spec must declare pipe_specs as a list input. """ - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_PLX_PATH) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_MTHDS_PATH) assert blueprint.pipe is not None diff --git a/tests/integration/pipelex/client/test_client.py b/tests/integration/pipelex/client/test_client.py deleted file mode 100644 index 110cfaa30..000000000 --- a/tests/integration/pipelex/client/test_client.py +++ /dev/null @@ -1,132 +0,0 @@ -import pytest -from pydantic import BaseModel - -from pipelex import pretty_print -from pipelex.client.client import PipelexClient -from pipelex.client.protocol import PipelineState -from pipelex.core.concepts.concept_factory import ConceptFactory -from pipelex.core.concepts.native.concept_native import NativeConceptCode -from pipelex.core.memory.working_memory_factory import WorkingMemoryFactory -from pipelex.core.stuffs.stuff import Stuff -from pipelex.core.stuffs.stuff_factory import StuffFactory -from pipelex.core.stuffs.text_content import TextContent - - -class Example(BaseModel): - pipe_code: str - memory: list[Stuff] - - -@pytest.mark.pipelex_api -@pytest.mark.asyncio(loop_scope="class") -class TestPipelexApiClient: - @pytest.fixture - def examples(self) -> list[Example]: - """Fixture providing test example for API client tests.""" - return [ - Example( - pipe_code="retrieve_excerpts", - memory=[ - StuffFactory.make_stuff( - concept=ConceptFactory.make_native_concept(native_concept_code=NativeConceptCode.TEXT), - name="text", - content=TextContent( - text=""" - The Dawn of Ultra-Rapid Transit: NextGen High-Speed Trains Redefine Travel - By Eliza Montgomery, Transportation Technology Reporter - - In an era where time is increasingly precious, a revolution in rail transportation is quietly - transforming how we connect cities and regions. The emergence of ultra-high-speed train - networks, capable of speeds exceeding 350 mph, promises to render certain short-haul - flights obsolete while dramatically reducing carbon emissions. - - QuantumRail's Breakthrough Technology - Leading this transportation revolution is QuantumRail Technologies, whose new MagLev-X - platform has shattered previous speed records during recent tests in Nevada's - Velocity Valley testinggrounds. The train achieved a remarkable 368 mph, - maintaining this speed for over fifteen minutes. - - 'What we're seeing isn't just an incremental improvementβ€”it's a fundamental shift - in transportationphysics,' explains Dr. Hiroshi Takahashi, Chief Engineer at - QuantumRail. 'The MagLev-X's superconducting magnets and aerodynamic profile - allow us to overcome limitations that have constrained train speeds for decades.' - - Economic Implications - The introduction of these next-generation trains isn't merely a technical - achievementβ€”it represents a potential economic windfall for connected regions. - The TransContinental Alliance, a consortium of cities supporting high-speed rail - development, estimates that new high-speed corridors could generate $87 - billion in economic activity over the next decade. - - 'When you can travel between Chicago and Detroit in under an hour, - you're essentially creating a single economic zone, notes Dr. Amara Washington, - economist at the Urban Mobility Institute. This transforms labor markets, housing - patterns, and business relationships. - - WindStream's Competitive Response - Not to be outdone, European manufacturer WindStream Mobility has unveiled - its own ultra-high-speed platform, the AeroGlide TGV-7. Featuring a - distinctive bionic design inspired by peregrine falcons, the train uses an innovative - hybrid propulsion system that combines traditional electric motors with - compressed air boosters for acceleration phases. - """, - ), - ), - StuffFactory.make_stuff( - concept=ConceptFactory.make( - concept_code="Question", - domain_code="answer", - description="answer.Question", - structure_class_name="Question", - ), - name="question", - content=TextContent(text="Aerodynamic features?"), - ), - ], - ), - ] - - async def test_client_execute_pipeline( - self, - examples: list[Example], - ): - """Test the execute_pipe method with the example. - - Args: - examples: List of test examples from the fixture - - """ - for example in examples: - # Create working memory from example data - question = example.memory[1] - text = example.memory[0] - memory = WorkingMemoryFactory.make_from_multiple_stuffs(stuff_list=[question, text], main_name=text.stuff_name or text.concept.code) - - # Execute pipe - client = PipelexClient() - pipeline_reponse = await client.execute_pipeline( - pipe_code=example.pipe_code, - inputs=memory, - ) - - pretty_print(pipeline_reponse, title="PIPELINE RESPONSE") - # Verify result - assert pipeline_reponse.pipe_output - assert pipeline_reponse.pipe_output.pipeline_run_id is not None - assert pipeline_reponse.pipeline_state == PipelineState.COMPLETED - assert pipeline_reponse.pipe_output is not None - - working_memory = pipeline_reponse.pipe_output.working_memory - pretty_print(working_memory, title="WORKING MEMORY") - - # Verify question structure - assert working_memory.root["question"].content["text"] == "Aerodynamic features?" - - # Verify main_stuff structure - assert working_memory.root["main_stuff"] is not None - assert working_memory.root["main_stuff"].concept == "retrieve.RetrievedExcerpt" - assert working_memory.root["main_stuff"].content is not None - assert len(working_memory.root["main_stuff"].content) > 0 - - # Verify text structure - assert "The Dawn of Ultra-Rapid Transit" in working_memory.root["text"].content["text"] diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.plx b/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.mthds similarity index 76% rename from tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.plx rename to tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.mthds index d3c652a73..fa1df60e1 100644 --- a/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.plx +++ b/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.mthds @@ -1,4 +1,4 @@ -domain = "base_domain" +domain = "base_domain" description = "Base domain with root Person concept" # Root concept with structure - this is the base of the inheritance chain @@ -7,13 +7,13 @@ description = "A person with basic information" [concept.Person.structure] first_name = { type = "text", required = true, description = "First name" } -last_name = { type = "text", required = true, description = "Last name" } +last_name = { type = "text", required = true, description = "Last name" } # Concepts refining native concepts in the base domain [concept.Biography] description = "A biographical text about a person" -refines = "Text" +refines = "Text" [concept.Portrait] description = "A portrait image of a person" -refines = "native.Image" +refines = "native.Image" diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.plx b/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.mthds similarity index 84% rename from tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.plx rename to tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.mthds index 82b32ef8e..1b4192e01 100644 --- a/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.plx +++ b/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.mthds @@ -1,4 +1,4 @@ -domain = "middle_domain" +domain = "middle_domain" description = "Middle domain with chain of refinements - INTENTIONALLY OUT OF ORDER" # INTENTIONALLY DEFINED OUT OF ORDER: @@ -8,31 +8,31 @@ description = "Middle domain with chain of refinements - INTENTIONALLY OUT OF OR # Level 3: PlatinumCustomer refines VIPCustomer (defined FIRST, but VIPCustomer not yet defined) [concept.PlatinumCustomer] description = "A platinum customer with highest privileges" -refines = "middle_domain.VIPCustomer" +refines = "middle_domain.VIPCustomer" # Level 2: VIPCustomer refines Customer (defined SECOND, but Customer not yet defined) [concept.VIPCustomer] description = "A VIP customer with special privileges" -refines = "middle_domain.Customer" +refines = "middle_domain.Customer" # Level 1: Customer refines Person from base_domain (defined THIRD) # This one should work IF base_domain is loaded first [concept.Customer] description = "A customer" -refines = "base_domain.Person" +refines = "base_domain.Person" # Native concept refinements mixed in - also out of order! # UrgentNotification refines WelcomeMessage but is defined FIRST [concept.UrgentNotification] description = "An urgent notification for platinum customers" -refines = "middle_domain.WelcomeMessage" +refines = "middle_domain.WelcomeMessage" # WelcomeMessage refines Text (native) - defined AFTER UrgentNotification [concept.WelcomeMessage] description = "A welcome message for customers" -refines = "Text" +refines = "Text" # Simple native refinement (no ordering issues) [concept.CustomerPortrait] description = "A portrait of a customer" -refines = "Image" +refines = "Image" diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.plx b/tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.mthds similarity index 79% rename from tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.plx rename to tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.mthds index 9ea057526..a7196736b 100644 --- a/tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.plx +++ b/tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.mthds @@ -1,4 +1,4 @@ -domain = "out_of_order_test" +domain = "out_of_order_test" description = "Test library for out-of-order concept refinement" # INTENTIONALLY DEFINED OUT OF ORDER: @@ -8,14 +8,14 @@ description = "Test library for out-of-order concept refinement" # Refined concept (defined FIRST, but references Customer which is defined AFTER) [concept.VIPCustomer] description = "A VIP customer with special privileges" -refines = "out_of_order_test.Customer" +refines = "out_of_order_test.Customer" # Base concept WITH structure (defined AFTER VIPCustomer) [concept.Customer] description = "A customer" [concept.Customer.structure] -name = { type = "text", required = true, description = "Customer name" } +name = { type = "text", required = true, description = "Customer name" } email = { type = "text", required = true, description = "Customer email" } # Concepts refining native concepts (should always work regardless of order) @@ -23,16 +23,16 @@ email = { type = "text", required = true, description = "Customer email" } [concept.Poem] description = "A poem is a piece of text with artistic expression" -refines = "native.Text" +refines = "native.Text" [concept.Summary] description = "A summary of a longer text" -refines = "Text" +refines = "Text" [concept.Screenshot] description = "A screenshot image" -refines = "native.Image" +refines = "native.Image" [concept.DetailedSummary] description = "A detailed summary that refines Summary" -refines = "out_of_order_test.Summary" +refines = "out_of_order_test.Summary" diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py b/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py index 717428a65..fdae68714 100644 --- a/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py +++ b/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py @@ -13,7 +13,7 @@ async def test_simple_out_of_order_refines_single_file(self): """Test that concept loading fails when refining concept is defined before base (single file). This test reproduces the bug where: - 1. VIPCustomer is defined BEFORE Customer in the PLX file + 1. VIPCustomer is defined BEFORE Customer in the MTHDS file 2. VIPCustomer refines Customer 3. When loading concepts, VIPCustomer is processed first 4. ConceptFactory._handle_refines tries to generate a structure class @@ -21,30 +21,30 @@ async def test_simple_out_of_order_refines_single_file(self): 5. Customer's class isn't registered yet, so lookup fails 6. Error: "Base class 'Customer' not found in native classes or class registry" """ - plx_file_path = Path(__file__).parent / "out_of_order_refines.plx" - assert plx_file_path.exists(), f"PLX file not found: {plx_file_path}" + mthds_file_path = Path(__file__).parent / "out_of_order_refines.mthds" + assert mthds_file_path.exists(), f"MTHDS file not found: {mthds_file_path}" # validate_bundle internally loads libraries which triggers ConceptFactory.make_from_blueprint # This should fail because VIPCustomer is defined before Customer # with pytest.raises(ConceptFactoryError) as exc_info: - await validate_bundle(plx_file_path=plx_file_path) + await validate_bundle(mthds_file_path=mthds_file_path) async def test_multi_level_out_of_order_refines_across_files(self): """Test multi-level refinement chain fails when concepts are out of order across files. This test reproduces a more complex scenario where: - File 1 (base_domain.plx): + File 1 (base_domain.mthds): - Person (root concept with structure) - File 2 (middle_domain.plx) - concepts defined in REVERSE order: + File 2 (middle_domain.mthds) - concepts defined in REVERSE order: - PlatinumCustomer refines VIPCustomer (defined FIRST) - VIPCustomer refines Customer (defined SECOND) - Customer refines Person (defined THIRD) The inheritance chain is: PlatinumCustomer -> VIPCustomer -> Customer -> Person - When loading middle_domain.plx: + When loading middle_domain.mthds: 1. PlatinumCustomer is processed first 2. It tries to refine VIPCustomer, but VIPCustomer is not yet registered 3. Error: "Base class 'VIPCustomer' not found in native classes or class registry" @@ -56,10 +56,10 @@ async def test_multi_level_out_of_order_refines_across_files(self): """ multi_file_dir = Path(__file__).parent / "multi_file" assert multi_file_dir.exists(), f"Multi-file test directory not found: {multi_file_dir}" - assert (multi_file_dir / "base_domain.plx").exists(), "base_domain.plx not found" - assert (multi_file_dir / "middle_domain.plx").exists(), "middle_domain.plx not found" + assert (multi_file_dir / "base_domain.mthds").exists(), "base_domain.mthds not found" + assert (multi_file_dir / "middle_domain.mthds").exists(), "middle_domain.mthds not found" - # validate_bundles_from_directory loads all PLX files in the directory - # Files are loaded in order, but within middle_domain.plx concepts are out of order + # validate_bundles_from_directory loads all MTHDS files in the directory + # Files are loaded in order, but within middle_domain.mthds concepts are out of order # with pytest.raises(ConceptFactoryError) as exc_info: await validate_bundles_from_directory(directory=multi_file_dir) diff --git a/tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.plx b/tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.mthds similarity index 70% rename from tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.plx rename to tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.mthds index f4ce424d3..f18041041 100644 --- a/tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.plx +++ b/tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.mthds @@ -1,4 +1,4 @@ -domain = "refines_custom_test" +domain = "refines_custom_test" description = "Test library for refining custom concepts" # Base concept WITH structure @@ -6,10 +6,10 @@ description = "Test library for refining custom concepts" description = "A customer" [concept.Customer.structure] -name = { type = "text", required = true, description = "Customer name" } +name = { type = "text", required = true, description = "Customer name" } email = { type = "text", required = true, description = "Customer email" } # Refined concept (inherits structure, cannot add own) [concept.VIPCustomer] description = "A VIP customer with special privileges" -refines = "refines_custom_test.Customer" +refines = "refines_custom_test.Customer" diff --git a/tests/integration/pipelex/core/memory/test_working_memory_factory.py b/tests/integration/pipelex/core/memory/test_working_memory_factory.py index 89871567d..ccc524a9b 100644 --- a/tests/integration/pipelex/core/memory/test_working_memory_factory.py +++ b/tests/integration/pipelex/core/memory/test_working_memory_factory.py @@ -9,7 +9,7 @@ from tests.cases.images import ImageTestCases if TYPE_CHECKING: - from pipelex.client.protocol import PipelineInputs + from mthds.models.pipeline_inputs import PipelineInputs class TestWorkingMemoryFactory: diff --git a/tests/integration/pipelex/core/packages/conftest.py b/tests/integration/pipelex/core/packages/conftest.py new file mode 100644 index 000000000..0403bbd74 --- /dev/null +++ b/tests/integration/pipelex/core/packages/conftest.py @@ -0,0 +1,107 @@ +# ruff: noqa: S404, S603, S607 β€” test fixture uses subprocess to build a local bare git repo +"""Fixtures for VCS integration tests. + +Creates bare git repositories with tagged versions, accessible via file:// protocol. +""" + +import subprocess +from pathlib import Path + +import pytest + +from tests.integration.pipelex.core.packages.test_vcs_data import DependentFixtureData, VCSFixtureData + + +@pytest.fixture(scope="class") +def bare_git_repo(tmp_path_factory: pytest.TempPathFactory) -> Path: + """Create a bare git repo with two tagged versions (v1.0.0, v1.1.0). + + The repo contains METHODS.toml and a .mthds bundle file at each version. + Returns the ``file://`` URL suitable for git operations. + """ + base = tmp_path_factory.mktemp("vcs_fixture") + bare_path = base / "repo.git" + work_path = base / "work" + + # Create bare repo + subprocess.run(["git", "init", "--bare", str(bare_path)], check=True, capture_output=True) + + # Create working clone + subprocess.run(["git", "clone", str(bare_path), str(work_path)], check=True, capture_output=True) + + # Configure git user for commits + subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "config", "user.name", "Test"], cwd=work_path, check=True, capture_output=True) + + # --- v1.0.0 --- + (work_path / "METHODS.toml").write_text(VCSFixtureData.METHODS_TOML) + mthds_dir = work_path / ".mthds" + mthds_dir.mkdir(exist_ok=True) + (mthds_dir / "main.mthds").write_text(VCSFixtureData.BUNDLE_CONTENT) + + subprocess.run(["git", "add", "-A"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "commit", "-m", "v1.0.0"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "tag", "v1.0.0"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "push", "origin", "HEAD", "--tags"], cwd=work_path, check=True, capture_output=True) + + # --- v1.1.0 --- + (work_path / "METHODS.toml").write_text(VCSFixtureData.METHODS_TOML_V110) + (mthds_dir / "main.mthds").write_text(VCSFixtureData.BUNDLE_CONTENT_V110) + + subprocess.run(["git", "add", "-A"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "commit", "-m", "v1.1.0"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "tag", "v1.1.0"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "push", "origin", "HEAD", "--tags"], cwd=work_path, check=True, capture_output=True) + + return bare_path + + +@pytest.fixture(scope="class") +def bare_git_repo_url(bare_git_repo: Path) -> str: + """Return the file:// URL for the bare git repo fixture.""" + return f"file://{bare_git_repo}" + + +@pytest.fixture(scope="class") +def bare_git_repo_dependent(tmp_path_factory: pytest.TempPathFactory) -> Path: + """Create a bare git repo for dependent-pkg with v1.0.0. + + This package declares a dependency on vcs-fixture. + """ + base = tmp_path_factory.mktemp("dependent_fixture") + bare_path = base / "dependent.git" + work_path = base / "work" + + subprocess.run(["git", "init", "--bare", str(bare_path)], check=True, capture_output=True) + subprocess.run(["git", "clone", str(bare_path), str(work_path)], check=True, capture_output=True) + + subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "config", "user.name", "Test"], cwd=work_path, check=True, capture_output=True) + + # --- v1.0.0 --- + (work_path / "METHODS.toml").write_text(DependentFixtureData.METHODS_TOML) + mthds_dir = work_path / ".mthds" + mthds_dir.mkdir(exist_ok=True) + (mthds_dir / "main.mthds").write_text(DependentFixtureData.BUNDLE_CONTENT) + + subprocess.run(["git", "add", "-A"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "commit", "-m", "v1.0.0"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "tag", "v1.0.0"], cwd=work_path, check=True, capture_output=True) + subprocess.run(["git", "push", "origin", "HEAD", "--tags"], cwd=work_path, check=True, capture_output=True) + + return bare_path + + +@pytest.fixture(scope="class") +def bare_git_repo_dependent_url(bare_git_repo_dependent: Path) -> str: + """Return the file:// URL for the dependent bare git repo.""" + return f"file://{bare_git_repo_dependent}" + + +@pytest.fixture(scope="class") +def transitive_url_overrides(bare_git_repo_url: str, bare_git_repo_dependent_url: str) -> dict[str, str]: + """URL override map for both fixture repos.""" + return { + "github.com/mthds-test/vcs-fixture": bare_git_repo_url, + "github.com/mthds-test/dependent-pkg": bare_git_repo_dependent_url, + } diff --git a/tests/integration/pipelex/core/packages/test_cross_package_integration.py b/tests/integration/pipelex/core/packages/test_cross_package_integration.py new file mode 100644 index 000000000..40cb16296 --- /dev/null +++ b/tests/integration/pipelex/core/packages/test_cross_package_integration.py @@ -0,0 +1,88 @@ +from pathlib import Path + +from pipelex.core.interpreter.interpreter import PipelexInterpreter +from pipelex.core.packages.dependency_resolver import resolve_local_dependencies +from pipelex.core.packages.discovery import find_package_manifest +from pipelex.core.packages.manifest import MthdsPackageManifest +from pipelex.core.packages.visibility import check_visibility_for_blueprints + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent.parent / "data" / "packages" + + +class TestCrossPackageIntegration: + """Integration tests for cross-package dependency resolution using physical test fixtures.""" + + def test_consumer_package_visibility_passes(self): + """Consumer package with cross-package refs passes visibility checks.""" + analysis_path = PACKAGES_DATA_DIR / "consumer_package" / "analysis.mthds" + + manifest = find_package_manifest(analysis_path) + assert manifest is not None + assert len(manifest.dependencies) == 1 + assert manifest.dependencies[0].alias == "scoring_dep" + assert manifest.dependencies[0].path == "../scoring_dep" + + analysis_bp = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=analysis_path) + + # Visibility check should pass: the cross-package ref alias is known + errors = check_visibility_for_blueprints(manifest=manifest, blueprints=[analysis_bp]) + assert errors == [] + + def test_resolve_consumer_dependencies(self): + """Resolve the consumer package's dependency to scoring_dep.""" + analysis_path = PACKAGES_DATA_DIR / "consumer_package" / "analysis.mthds" + package_root = PACKAGES_DATA_DIR / "consumer_package" + + manifest = find_package_manifest(analysis_path) + assert manifest is not None + + resolved = resolve_local_dependencies(manifest=manifest, package_root=package_root) + assert len(resolved) == 1 + + dep = resolved[0] + assert dep.alias == "scoring_dep" + assert dep.manifest is not None + assert dep.manifest.address == "github.com/mthds/scoring-lib" + assert len(dep.mthds_files) >= 1 + assert dep.exported_pipe_codes is not None + assert "pkg_test_compute_score" in dep.exported_pipe_codes + + def test_scoring_dep_manifest_parsed_correctly(self): + """Verify the scoring_dep METHODS.toml is parsed correctly.""" + scoring_manifest_path = PACKAGES_DATA_DIR / "scoring_dep" / "scoring.mthds" + manifest = find_package_manifest(scoring_manifest_path) + assert manifest is not None + assert manifest.address == "github.com/mthds/scoring-lib" + assert manifest.version == "2.0.0" + assert len(manifest.exports) == 1 + assert manifest.exports[0].domain_path == "pkg_test_scoring_dep" + assert "pkg_test_compute_score" in manifest.exports[0].pipes + + def test_consumer_bundle_parses_with_cross_package_refs(self): + """Consumer bundle with cross-package pipe refs should parse without errors.""" + analysis_path = PACKAGES_DATA_DIR / "consumer_package" / "analysis.mthds" + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=analysis_path) + + assert blueprint.domain == "pkg_test_consumer_analysis" + assert blueprint.pipe is not None + assert "pkg_test_analyze_item" in blueprint.pipe + + def test_unknown_alias_in_consumer_produces_error(self): + """If a cross-package ref uses an unknown alias, visibility check produces an error.""" + analysis_path = PACKAGES_DATA_DIR / "consumer_package" / "analysis.mthds" + + # Create a manifest without the scoring_dep dependency + manifest_no_deps = MthdsPackageManifest( + address="github.com/mthds/consumer-app", + version="1.0.0", + description="Consumer with no deps declared", + ) + + analysis_bp = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=analysis_path) + + errors = check_visibility_for_blueprints(manifest=manifest_no_deps, blueprints=[analysis_bp]) + # Should have an error for unknown alias "scoring_dep" + cross_package_errors = [err for err in errors if "scoring_dep" in err.message] + assert len(cross_package_errors) >= 1 + assert "[dependencies]" in cross_package_errors[0].message diff --git a/tests/integration/pipelex/core/packages/test_library_isolation_integration.py b/tests/integration/pipelex/core/packages/test_library_isolation_integration.py new file mode 100644 index 000000000..4720fcfe4 --- /dev/null +++ b/tests/integration/pipelex/core/packages/test_library_isolation_integration.py @@ -0,0 +1,125 @@ +from pathlib import Path + +from pipelex.hub import get_library_manager, set_current_library +from pipelex.libraries.library_manager_abstract import LibraryManagerAbstract + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent.parent / "data" / "packages" + + +class TestLibraryIsolationIntegration: + """Integration tests for per-package library isolation using physical test fixtures.""" + + def _setup_library_for_path(self, mthds_paths: list[Path]) -> tuple[LibraryManagerAbstract, str]: + """Set up a library manager with the hub's current library for the given paths.""" + library_manager = get_library_manager() + library_id, _library = library_manager.open_library() + set_current_library(library_id=library_id) + library_manager.load_libraries(library_id=library_id, library_file_paths=mthds_paths) + return library_manager, library_id + + def test_consumer_loads_with_isolated_dependency(self): + """Consumer package loads with dependency in isolated child library.""" + consumer_mthds = [PACKAGES_DATA_DIR / "consumer_package" / "analysis.mthds"] + manager, library_id = self._setup_library_for_path(consumer_mthds) + library = manager.get_library(library_id) + + # scoring_dep should be registered as a child library + child = library.get_dependency_library("scoring_dep") + assert child is not None + + # Child should have the scoring concept + scoring_concept = child.concept_library.get_optional_concept("pkg_test_scoring_dep.PkgTestWeightedScore") + assert scoring_concept is not None + assert scoring_concept.code == "PkgTestWeightedScore" + + # Main library should NOT have the concept under its native key + # (native-key workaround was removed) + assert not library.concept_library.is_concept_exists("pkg_test_scoring_dep.PkgTestWeightedScore") + + # But aliased lookup should still work + aliased = library.concept_library.get_optional_concept("scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore") + assert aliased is not None + + manager.teardown(library_id=library_id) + + def test_cross_package_pipe_lookup_works(self): + """Cross-package pipe lookup via aliased key works after loading.""" + consumer_mthds = [PACKAGES_DATA_DIR / "consumer_package" / "analysis.mthds"] + manager, library_id = self._setup_library_for_path(consumer_mthds) + library = manager.get_library(library_id) + + # Cross-package pipe should be findable via aliased key + pipe = library.pipe_library.get_optional_pipe("scoring_dep->pkg_test_compute_score") + assert pipe is not None + assert pipe.code == "pkg_test_compute_score" + + # Child library should also have the pipe + child = library.get_dependency_library("scoring_dep") + assert child is not None + child_pipe = child.pipe_library.get_optional_pipe("pkg_test_compute_score") + assert child_pipe is not None + + manager.teardown(library_id=library_id) + + def test_two_deps_same_concept_code_both_load(self): + """Two dependencies with same concept code load cleanly via isolation.""" + multi_mthds = [PACKAGES_DATA_DIR / "multi_dep_consumer" / "multi.mthds"] + manager, library_id = self._setup_library_for_path(multi_mthds) + library = manager.get_library(library_id) + + # Both child libraries should exist + scoring_child = library.get_dependency_library("scoring_dep") + analytics_child = library.get_dependency_library("analytics_dep") + assert scoring_child is not None + assert analytics_child is not None + + # Both have PkgTestWeightedScore but in different domains + scoring_concept = scoring_child.concept_library.get_optional_concept("pkg_test_scoring_dep.PkgTestWeightedScore") + analytics_concept = analytics_child.concept_library.get_optional_concept("pkg_test_analytics_dep.PkgTestWeightedScore") + assert scoring_concept is not None + assert analytics_concept is not None + assert scoring_concept.domain_code == "pkg_test_scoring_dep" + assert analytics_concept.domain_code == "pkg_test_analytics_dep" + + # Both resolvable via resolve_concept + resolved_scoring = library.resolve_concept("scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore") + resolved_analytics = library.resolve_concept("analytics_dep->pkg_test_analytics_dep.PkgTestWeightedScore") + assert resolved_scoring is not None + assert resolved_analytics is not None + assert resolved_scoring.domain_code != resolved_analytics.domain_code + + manager.teardown(library_id=library_id) + + def test_refinement_chain_across_packages(self): + """Consumer with concept refining cross-package concept loads and validates.""" + refining_mthds = [PACKAGES_DATA_DIR / "refining_consumer" / "refining.mthds"] + manager, library_id = self._setup_library_for_path(refining_mthds) + library = manager.get_library(library_id) + + # Child library should exist + scoring_child = library.get_dependency_library("scoring_dep") + assert scoring_child is not None + + # The refining concept should exist in main library + refining_concept = library.concept_library.get_optional_concept("pkg_test_refining.PkgTestRefinedScore") + assert refining_concept is not None + assert refining_concept.refines == "scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore" + + # resolve_concept should find the target through the child library + target = library.resolve_concept("scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore") + assert target is not None + assert target.code == "PkgTestWeightedScore" + + manager.teardown(library_id=library_id) + + def test_concept_resolver_wired_after_dep_loading(self): + """The concept resolver is wired to the library after dependency loading.""" + consumer_mthds = [PACKAGES_DATA_DIR / "consumer_package" / "analysis.mthds"] + manager, library_id = self._setup_library_for_path(consumer_mthds) + library = manager.get_library(library_id) + + # The concept resolver should be set (it's a private attribute) + assert library.concept_library._concept_resolver is not None # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + + manager.teardown(library_id=library_id) diff --git a/tests/integration/pipelex/core/packages/test_transitive_integration.py b/tests/integration/pipelex/core/packages/test_transitive_integration.py new file mode 100644 index 000000000..31a282b4f --- /dev/null +++ b/tests/integration/pipelex/core/packages/test_transitive_integration.py @@ -0,0 +1,69 @@ +from pathlib import Path + +from pipelex.core.packages.dependency_resolver import resolve_all_dependencies +from pipelex.core.packages.lock_file import generate_lock_file +from pipelex.core.packages.manifest import MthdsPackageManifest, PackageDependency + + +class TestTransitiveIntegration: + """Integration tests for transitive dependency resolution using local bare git repos.""" + + def test_transitive_chain_resolves( + self, + transitive_url_overrides: dict[str, str], + tmp_path: Path, + ) -> None: + """Resolve dependent-pkg and assert vcs-fixture is also transitively resolved.""" + manifest = MthdsPackageManifest( + address="github.com/mthds-test/consumer", + version="1.0.0", + description="Consumer with transitive deps", + dependencies=[ + PackageDependency( + address="github.com/mthds-test/dependent-pkg", + version="^1.0.0", + alias="dependent_pkg", + ), + ], + ) + + resolved = resolve_all_dependencies( + manifest=manifest, + package_root=tmp_path, + cache_root=tmp_path / "cache", + fetch_url_overrides=transitive_url_overrides, + ) + + addresses = {dep.address for dep in resolved} + assert "github.com/mthds-test/dependent-pkg" in addresses + assert "github.com/mthds-test/vcs-fixture" in addresses + + def test_lock_includes_transitive( + self, + transitive_url_overrides: dict[str, str], + tmp_path: Path, + ) -> None: + """Generate lock from transitive resolution; both addresses appear in lock file.""" + manifest = MthdsPackageManifest( + address="github.com/mthds-test/consumer", + version="1.0.0", + description="Consumer with transitive deps", + dependencies=[ + PackageDependency( + address="github.com/mthds-test/dependent-pkg", + version="^1.0.0", + alias="dependent_pkg", + ), + ], + ) + + resolved = resolve_all_dependencies( + manifest=manifest, + package_root=tmp_path, + cache_root=tmp_path / "cache", + fetch_url_overrides=transitive_url_overrides, + ) + + lock = generate_lock_file(manifest, resolved) + assert "github.com/mthds-test/dependent-pkg" in lock.packages + assert "github.com/mthds-test/vcs-fixture" in lock.packages diff --git a/tests/integration/pipelex/core/packages/test_vcs_data.py b/tests/integration/pipelex/core/packages/test_vcs_data.py new file mode 100644 index 000000000..7d4e0332e --- /dev/null +++ b/tests/integration/pipelex/core/packages/test_vcs_data.py @@ -0,0 +1,67 @@ +"""Content constants for VCS integration test fixtures. + +Provides METHODS.toml and .mthds bundle content used by conftest.py +to populate bare git repo fixtures. +""" + +from typing import ClassVar + + +class VCSFixtureData: + """Constants for building test git repositories.""" + + METHODS_TOML: ClassVar[str] = """\ +[package] +address = "github.com/mthds-test/vcs-fixture" +version = "1.0.0" +description = "A test fixture package for VCS integration tests" +authors = ["TestBot"] + +[exports.vcs_fixture] +pipes = ["vcs_test_pipe"] +""" + + METHODS_TOML_V110: ClassVar[str] = """\ +[package] +address = "github.com/mthds-test/vcs-fixture" +version = "1.1.0" +description = "A test fixture package for VCS integration tests (v1.1.0)" +authors = ["TestBot"] + +[exports.vcs_fixture] +pipes = ["vcs_test_pipe", "vcs_extra_pipe"] +""" + + BUNDLE_CONTENT: ClassVar[str] = """\ +--- domain vcs_fixture +--- pipe vcs_test_pipe +""" + + BUNDLE_CONTENT_V110: ClassVar[str] = """\ +--- domain vcs_fixture +--- pipe vcs_test_pipe +--- pipe vcs_extra_pipe +""" + + +class DependentFixtureData: + """Constants for a package that depends on vcs-fixture.""" + + METHODS_TOML: ClassVar[str] = """\ +[package] +address = "github.com/mthds-test/dependent-pkg" +version = "1.0.0" +description = "A dependent test fixture package" +authors = ["TestBot"] + +[dependencies] +vcs_fixture = { address = "github.com/mthds-test/vcs-fixture", version = "^1.0.0" } + +[exports.dependent] +pipes = ["dependent_pipe"] +""" + + BUNDLE_CONTENT: ClassVar[str] = """\ +--- domain dependent +--- pipe dependent_pipe +""" diff --git a/tests/integration/pipelex/core/packages/test_vcs_resolver_integration.py b/tests/integration/pipelex/core/packages/test_vcs_resolver_integration.py new file mode 100644 index 000000000..503f1d703 --- /dev/null +++ b/tests/integration/pipelex/core/packages/test_vcs_resolver_integration.py @@ -0,0 +1,140 @@ +from pathlib import Path + +import pytest +from semantic_version import Version # type: ignore[import-untyped] + +from pipelex.core.packages.dependency_resolver import ( + resolve_all_dependencies, + resolve_remote_dependency, +) +from pipelex.core.packages.exceptions import DependencyResolveError +from pipelex.core.packages.manifest import MthdsPackageManifest, PackageDependency +from pipelex.core.packages.package_cache import is_cached +from pipelex.core.packages.vcs_resolver import clone_at_version, list_remote_version_tags + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent.parent / "data" / "packages" + + +class TestVCSResolverIntegration: + """Layer 3 integration tests for VCS resolver + cache using a local bare git repo.""" + + def test_list_remote_tags(self, bare_git_repo_url: str): + """Both tagged versions are found in the bare repo.""" + version_tags = list_remote_version_tags(bare_git_repo_url) + versions = {ver for ver, _tag in version_tags} + assert Version("1.0.0") in versions + assert Version("1.1.0") in versions + + def test_clone_at_version(self, bare_git_repo_url: str, tmp_path: Path): + """Cloning at v1.0.0 produces a directory with METHODS.toml.""" + dest = tmp_path / "cloned" + clone_at_version(bare_git_repo_url, "v1.0.0", dest) + + assert (dest / "METHODS.toml").is_file() + content = (dest / "METHODS.toml").read_text() + assert 'version = "1.0.0"' in content + + def test_resolve_remote_dependency_mvs(self, bare_git_repo_url: str, tmp_path: Path): + """Constraint ^1.0.0 selects v1.0.0 via MVS.""" + dep = PackageDependency( + address="github.com/mthds-test/vcs-fixture", + version="^1.0.0", + alias="vcs_fixture", + ) + resolved = resolve_remote_dependency( + dep, + cache_root=tmp_path / "cache", + fetch_url_override=bare_git_repo_url, + ) + assert resolved.alias == "vcs_fixture" + assert resolved.manifest is not None + assert resolved.manifest.version == "1.0.0" + assert resolved.package_root.is_dir() + + def test_resolve_remote_dependency_higher_constraint(self, bare_git_repo_url: str, tmp_path: Path): + """Constraint >=1.1.0 selects v1.1.0.""" + dep = PackageDependency( + address="github.com/mthds-test/vcs-fixture", + version=">=1.1.0", + alias="vcs_fixture", + ) + resolved = resolve_remote_dependency( + dep, + cache_root=tmp_path / "cache", + fetch_url_override=bare_git_repo_url, + ) + assert resolved.manifest is not None + assert resolved.manifest.version == "1.1.0" + + def test_resolve_remote_dependency_no_match(self, bare_git_repo_url: str, tmp_path: Path): + """Constraint ^2.0.0 raises DependencyResolveError (no matching version).""" + dep = PackageDependency( + address="github.com/mthds-test/vcs-fixture", + version="^2.0.0", + alias="vcs_fixture", + ) + with pytest.raises(DependencyResolveError, match="No version satisfying"): + resolve_remote_dependency( + dep, + cache_root=tmp_path / "cache", + fetch_url_override=bare_git_repo_url, + ) + + def test_cache_hit_on_second_resolve(self, bare_git_repo_url: str, tmp_path: Path): + """Second resolve uses cache (same directory, no second clone).""" + cache_dir = tmp_path / "cache" + dep = PackageDependency( + address="github.com/mthds-test/vcs-fixture", + version="^1.0.0", + alias="vcs_fixture", + ) + + # First resolve: clones and caches + resolved_first = resolve_remote_dependency( + dep, + cache_root=cache_dir, + fetch_url_override=bare_git_repo_url, + ) + assert is_cached("github.com/mthds-test/vcs-fixture", "1.0.0", cache_root=cache_dir) + + # Second resolve: should use cache (same result) + resolved_second = resolve_remote_dependency( + dep, + cache_root=cache_dir, + fetch_url_override=bare_git_repo_url, + ) + assert resolved_first.package_root == resolved_second.package_root + + def test_resolve_all_mixed_local_and_remote(self, bare_git_repo_url: str, tmp_path: Path): + """Manifest with one local path dep + one remote dep resolves both.""" + manifest = MthdsPackageManifest( + address="github.com/mthds/consumer-app", + version="1.0.0", + description="Consumer with mixed deps", + dependencies=[ + PackageDependency( + address="github.com/mthds/scoring-lib", + version="2.0.0", + alias="scoring_dep", + path="../scoring_dep", + ), + PackageDependency( + address="github.com/mthds-test/vcs-fixture", + version="^1.0.0", + alias="vcs_fixture", + ), + ], + ) + package_root = PACKAGES_DATA_DIR / "consumer_package" + + resolved = resolve_all_dependencies( + manifest=manifest, + package_root=package_root, + cache_root=tmp_path / "cache", + fetch_url_overrides={"github.com/mthds-test/vcs-fixture": bare_git_repo_url}, + ) + + assert len(resolved) == 2 + aliases = {dep.alias for dep in resolved} + assert "scoring_dep" in aliases + assert "vcs_fixture" in aliases diff --git a/tests/integration/pipelex/core/packages/test_visibility_integration.py b/tests/integration/pipelex/core/packages/test_visibility_integration.py new file mode 100644 index 000000000..74ccee524 --- /dev/null +++ b/tests/integration/pipelex/core/packages/test_visibility_integration.py @@ -0,0 +1,92 @@ +import shutil +from pathlib import Path + +from pipelex.core.interpreter.interpreter import PipelexInterpreter +from pipelex.core.packages.discovery import find_package_manifest +from pipelex.core.packages.visibility import check_visibility_for_blueprints + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent.parent / "data" / "packages" + + +class TestVisibilityIntegration: + """Integration tests using physical METHODS.toml and .mthds files on disk.""" + + def test_legal_tools_package_valid_refs(self): + """Legal tools package: all cross-domain refs are to exported pipes -> no errors.""" + contracts_path = PACKAGES_DATA_DIR / "legal_tools" / "legal" / "contracts.mthds" + scoring_path = PACKAGES_DATA_DIR / "legal_tools" / "scoring" / "scoring.mthds" + + manifest = find_package_manifest(contracts_path) + assert manifest is not None + + contracts_bp = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=contracts_path) + scoring_bp = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=scoring_path) + + errors = check_visibility_for_blueprints(manifest=manifest, blueprints=[contracts_bp, scoring_bp]) + assert errors == [] + + def test_standalone_bundle_all_public(self): + """Standalone bundle (no METHODS.toml) -> all pipes public, no errors.""" + bundle_path = PACKAGES_DATA_DIR / "standalone_bundle" / "my_pipe.mthds" + + manifest = find_package_manifest(bundle_path) + assert manifest is None + + bundle_bp = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=bundle_path) + errors = check_visibility_for_blueprints(manifest=None, blueprints=[bundle_bp]) + assert errors == [] + + def test_modified_bundle_references_private_pipe(self, tmp_path: Path): + """Modified bundle that references a private pipe -> visibility error.""" + # Copy the legal_tools package to tmp_path + src_dir = PACKAGES_DATA_DIR / "legal_tools" + dst_dir = tmp_path / "legal_tools" + shutil.copytree(src_dir, dst_dir) + + # Modify contracts.mthds to reference the private helper pipe + contracts_path = dst_dir / "legal" / "contracts.mthds" + contracts_content = contracts_path.read_text(encoding="utf-8") + contracts_content = contracts_content.replace( + "pkg_test_scoring.pkg_test_compute_weighted_score", + "pkg_test_scoring.pkg_test_private_helper", + ) + # Add the pipe reference as a sequence step + modified_content = """\ +domain = "pkg_test_legal.contracts" +main_pipe = "pkg_test_extract_clause" + +[concept.PkgTestContractClause] +description = "A clause extracted from a contract" + +[pipe.pkg_test_extract_clause] +type = "PipeLLM" +description = "Extract the main clause from a contract" +output = "PkgTestContractClause" +prompt = "Extract the main clause from the following contract text: {{ text }}" + +[pipe.pkg_test_extract_clause.inputs] +text = "Text" + +[pipe.pkg_test_call_private] +type = "PipeSequence" +description = "Call a private pipe from another domain" +output = "Text" + +[[pipe.pkg_test_call_private.steps]] +pipe = "pkg_test_scoring.pkg_test_private_helper" +""" + contracts_path.write_text(modified_content, encoding="utf-8") + + scoring_path = dst_dir / "scoring" / "scoring.mthds" + + manifest = find_package_manifest(contracts_path) + assert manifest is not None + + contracts_bp = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=contracts_path) + scoring_bp = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=scoring_path) + + errors = check_visibility_for_blueprints(manifest=manifest, blueprints=[contracts_bp, scoring_bp]) + assert len(errors) == 1 + assert "pkg_test_private_helper" in errors[0].pipe_ref + assert "[exports" in errors[0].message diff --git a/tests/integration/pipelex/fixtures/combo_fixtures.py b/tests/integration/pipelex/fixtures/combo_fixtures.py index 1e3dbdc22..986ff294d 100644 --- a/tests/integration/pipelex/fixtures/combo_fixtures.py +++ b/tests/integration/pipelex/fixtures/combo_fixtures.py @@ -26,7 +26,7 @@ async def test_something(self, llm_combo: ModelCombo): from pipelex.hub import get_console from pipelex.pipelex import Pipelex -from pipelex.system.configuration.configs import ConfigPaths +from pipelex.system.configuration.config_loader import config_manager from pipelex.system.runtime import IntegrationMode, runtime_manager from pipelex.tools.misc.toml_utils import load_toml_with_tomlkit, save_toml_to_path from tests.integration.pipelex.fixtures.model_selection import ( @@ -57,16 +57,16 @@ def _setup_routing_for_backend(backend_name: str) -> tuple[MonkeyPatch, Path]: # Set up routing BEFORE Pipelex.make() routing_profile_name = f"all_{backend_name}" routing_monkeypatch = MonkeyPatch() - routing_profiles_path = Path(ConfigPaths.ROUTING_PROFILES_FILE_PATH) + routing_profiles_path = Path(config_manager.routing_profiles_file_path) routing_profiles_doc = load_toml_with_tomlkit(str(routing_profiles_path)) routing_profiles_doc["active"] = routing_profile_name routing_override_dir = Path(tempfile.mkdtemp(prefix="pipelex-routing-override-")) routing_override_path = routing_override_dir / routing_profiles_path.name save_toml_to_path(routing_profiles_doc, str(routing_override_path)) routing_monkeypatch.setattr( - ConfigPaths, - "ROUTING_PROFILES_FILE_PATH", - str(routing_override_path), + type(config_manager), + "routing_profiles_file_path", + property(lambda _self: str(routing_override_path)), ) get_console().print(f"[cyan]Routing to backend:[/cyan] {backend_name}") diff --git a/tests/integration/pipelex/language/test_mthds_factory.py b/tests/integration/pipelex/language/test_mthds_factory.py new file mode 100644 index 000000000..4f072be68 --- /dev/null +++ b/tests/integration/pipelex/language/test_mthds_factory.py @@ -0,0 +1,15 @@ +import pytest + +from pipelex import pretty_print +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.language.mthds_factory import MthdsFactory +from tests.unit.pipelex.core.test_data import InterpreterTestCases + + +class TestMthdsFactoryIntegration: + @pytest.mark.parametrize(("test_name", "expected_mthds_content", "blueprint"), InterpreterTestCases.VALID_TEST_CASES) + def test_make_mthds_content(self, test_name: str, expected_mthds_content: str, blueprint: PipelexBundleBlueprint): + mthds_content = MthdsFactory.make_mthds_content(blueprint=blueprint) + pretty_print(mthds_content, title=f"MTHDS content {test_name}") + pretty_print(expected_mthds_content, title=f"Expected MTHDS content {test_name}") + assert mthds_content == expected_mthds_content diff --git a/tests/integration/pipelex/language/test_plx_factory.py b/tests/integration/pipelex/language/test_plx_factory.py deleted file mode 100644 index 8930a5473..000000000 --- a/tests/integration/pipelex/language/test_plx_factory.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest - -from pipelex import pretty_print -from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint -from pipelex.language.plx_factory import PlxFactory -from tests.unit.pipelex.core.test_data import InterpreterTestCases - - -class TestPlxFactoryIntegration: - @pytest.mark.parametrize(("test_name", "expected_plx_content", "blueprint"), InterpreterTestCases.VALID_TEST_CASES) - def test_make_plx_content(self, test_name: str, expected_plx_content: str, blueprint: PipelexBundleBlueprint): - plx_content = PlxFactory.make_plx_content(blueprint=blueprint) - pretty_print(plx_content, title=f"Plx content {test_name}") - pretty_print(expected_plx_content, title=f"Expected PLX content {test_name}") - assert plx_content == expected_plx_content diff --git a/tests/integration/pipelex/libraries/test_concept_to_concept_references.py b/tests/integration/pipelex/libraries/test_concept_to_concept_references.py index 5a4e61c4d..0069729f7 100644 --- a/tests/integration/pipelex/libraries/test_concept_to_concept_references.py +++ b/tests/integration/pipelex/libraries/test_concept_to_concept_references.py @@ -1,4 +1,4 @@ -"""Integration tests for concept-to-concept references in PLX files.""" +"""Integration tests for concept-to-concept references in MTHDS files.""" import tempfile from collections.abc import Callable @@ -14,8 +14,8 @@ class TestConceptToConceptReferences: def test_load_concepts_with_single_reference(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts where one concept references another.""" - # Create a temporary PLX file with concept references - plx_content = """ + # Create a temporary MTHDS file with concept references + mthds_content = """ domain = "testapp" description = "Test domain for concept references" @@ -35,8 +35,8 @@ def test_load_concepts_with_single_reference(self, load_test_library: Callable[[ """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") load_test_library([Path(tmp_dir)]) @@ -60,7 +60,7 @@ def test_load_concepts_with_single_reference(self, load_test_library: Callable[[ def test_load_concepts_with_list_of_references(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts where one concept has a list of references to another.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain for list of concept references" @@ -81,8 +81,8 @@ def test_load_concepts_with_list_of_references(self, load_test_library: Callable """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") load_test_library([Path(tmp_dir)]) @@ -103,8 +103,8 @@ def test_load_concepts_with_list_of_references(self, load_test_library: Callable def test_load_concepts_dependency_order(self, load_test_library: Callable[[list[Path]], None]): """Test that concepts are loaded in dependency order (dependencies first).""" - # Define concepts in reverse dependency order in the PLX file - plx_content = """ + # Define concepts in reverse dependency order in the MTHDS file + mthds_content = """ domain = "testapp" description = "Test domain for dependency ordering" @@ -124,8 +124,8 @@ def test_load_concepts_dependency_order(self, load_test_library: Callable[[list[ """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") # This should not raise an error - Customer should be loaded before Invoice load_test_library([Path(tmp_dir)]) @@ -142,7 +142,7 @@ def test_load_concepts_dependency_order(self, load_test_library: Callable[[list[ def test_load_concepts_chain_dependencies(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts with chain dependencies: A -> B -> C.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain for chain dependencies" @@ -168,8 +168,8 @@ def test_load_concepts_chain_dependencies(self, load_test_library: Callable[[lis """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") load_test_library([Path(tmp_dir)]) @@ -187,7 +187,7 @@ def test_load_concepts_chain_dependencies(self, load_test_library: Callable[[lis def test_cycle_detection_raises_error(self, load_empty_library: Callable[[], str]): """Test that cyclic dependencies are detected and raise an error.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cyclic dependencies" @@ -205,8 +205,8 @@ def test_cycle_detection_raises_error(self, load_empty_library: Callable[[], str """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -220,7 +220,7 @@ def test_cycle_detection_raises_error(self, load_empty_library: Callable[[], str def test_cycle_detection_self_reference(self, load_empty_library: Callable[[], str]): """Test that a concept referencing itself is detected as a cycle.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with self-referencing concept" @@ -233,8 +233,8 @@ def test_cycle_detection_self_reference(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -247,7 +247,7 @@ def test_cycle_detection_self_reference(self, load_empty_library: Callable[[], s def test_cycle_detection_three_concepts(self, load_empty_library: Callable[[], str]): """Test that a cycle through three concepts (A -> B -> C -> A) is detected.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with three-concept cycle" @@ -271,8 +271,8 @@ def test_cycle_detection_three_concepts(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -285,7 +285,7 @@ def test_cycle_detection_three_concepts(self, load_empty_library: Callable[[], s def test_cycle_detection_long_chain(self, load_empty_library: Callable[[], str]): """Test that a cycle through many concepts (A -> B -> C -> D -> E -> A) is detected.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with long chain cycle" @@ -316,8 +316,8 @@ def test_cycle_detection_long_chain(self, load_empty_library: Callable[[], str]) """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -330,7 +330,7 @@ def test_cycle_detection_long_chain(self, load_empty_library: Callable[[], str]) def test_cycle_detection_through_list_field(self, load_empty_library: Callable[[], str]): """Test that cycles through list fields are detected.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cycle through list field" @@ -350,8 +350,8 @@ def test_cycle_detection_through_list_field(self, load_empty_library: Callable[[ """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -364,7 +364,7 @@ def test_cycle_detection_through_list_field(self, load_empty_library: Callable[[ def test_cycle_detection_partial_cycle_in_graph(self, load_empty_library: Callable[[], str]): """Test cycle detection when cycle is not at the start (D -> E -> F -> D, with A -> B -> C -> D).""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cycle deeper in the graph" @@ -400,8 +400,8 @@ def test_cycle_detection_partial_cycle_in_graph(self, load_empty_library: Callab """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -414,7 +414,7 @@ def test_cycle_detection_partial_cycle_in_graph(self, load_empty_library: Callab def test_cross_domain_concept_reference(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts with cross-domain references.""" - crm_plx = """ + crm_mthds = """ domain = "crm" description = "CRM domain" @@ -425,7 +425,7 @@ def test_cross_domain_concept_reference(self, load_test_library: Callable[[list[ name = { type = "text", description = "Customer name" } """ - accounting_plx = """ + accounting_mthds = """ domain = "accounting" description = "Accounting domain" @@ -438,11 +438,11 @@ def test_cross_domain_concept_reference(self, load_test_library: Callable[[list[ """ with tempfile.TemporaryDirectory() as tmp_dir: - crm_path = Path(tmp_dir) / "crm.plx" - crm_path.write_text(crm_plx, encoding="utf-8") + crm_path = Path(tmp_dir) / "crm.mthds" + crm_path.write_text(crm_mthds, encoding="utf-8") - accounting_path = Path(tmp_dir) / "accounting.plx" - accounting_path.write_text(accounting_plx, encoding="utf-8") + accounting_path = Path(tmp_dir) / "accounting.mthds" + accounting_path.write_text(accounting_mthds, encoding="utf-8") load_test_library([Path(tmp_dir)]) diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_double_dot.mthds_invalid b/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_double_dot.mthds_invalid new file mode 100644 index 000000000..5b9096ea0 --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_double_dot.mthds_invalid @@ -0,0 +1,5 @@ +domain = "legal..contracts" +description = "Invalid domain with double dots" + +[concept] +TestConcept = "A test concept" diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_leading_dot.mthds_invalid b/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_leading_dot.mthds_invalid new file mode 100644 index 000000000..505ac0291 --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_leading_dot.mthds_invalid @@ -0,0 +1,5 @@ +domain = ".legal" +description = "Invalid domain with leading dot" + +[concept] +TestConcept = "A test concept" diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_same_domain_pipe_ref.mthds_invalid b/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_same_domain_pipe_ref.mthds_invalid new file mode 100644 index 000000000..0e302774e --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/invalid_fixtures/invalid_same_domain_pipe_ref.mthds_invalid @@ -0,0 +1,11 @@ +domain = "my_domain" +description = "Invalid: same-domain pipe ref to non-existent pipe" + +[pipe] +[pipe.my_sequence] +type = "PipeSequence" +description = "Sequence with invalid same-domain ref" +output = "Text" +steps = [ + { pipe = "my_domain.nonexistent_pipe", result = "something" }, +] diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/test_hierarchical_domains.py b/tests/integration/pipelex/phase1_hierarchical_domains/test_hierarchical_domains.py new file mode 100644 index 000000000..3d63a3251 --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/test_hierarchical_domains.py @@ -0,0 +1,116 @@ +"""E2E spec tests for Phase 1: Hierarchical Domains + Pipe Namespacing. + +These tests validate actual .mthds files through the full pipeline: +interpret -> blueprint -> factory -> dry run (no inference). +""" + +from pathlib import Path + +import pytest + +from pipelex.core.interpreter.exceptions import PipelexInterpreterError +from pipelex.pipeline.validate_bundle import ValidateBundleError, validate_bundle, validate_bundles_from_directory + +VALID_DIR = Path(__file__).parent / "valid_fixtures" +INVALID_DIR = Path(__file__).parent / "invalid_fixtures" + + +@pytest.mark.asyncio(loop_scope="class") +class TestHierarchicalDomainsAndPipeNamespacing: + """E2E spec tests for hierarchical domains and pipe namespacing.""" + + # ========== POSITIVE TESTS ========== + + async def test_single_segment_domain_baseline(self): + """Single-segment domain should work as before.""" + result = await validate_bundle( + mthds_file_path=VALID_DIR / "hierarchical_domain_single.mthds", + library_dirs=[VALID_DIR], + ) + assert result is not None + assert len(result.blueprints) == 1 + assert result.blueprints[0].domain == "legal" + assert len(result.pipes) > 0 + + async def test_nested_hierarchical_domain(self): + """Nested hierarchical domain 'legal.contracts' with concepts and pipes.""" + result = await validate_bundle( + mthds_file_path=VALID_DIR / "hierarchical_domain_nested.mthds", + library_dirs=[VALID_DIR], + ) + assert result is not None + assert len(result.blueprints) == 1 + assert result.blueprints[0].domain == "legal.contracts" + assert result.blueprints[0].concept is not None + assert "NonCompeteClause" in result.blueprints[0].concept + assert len(result.pipes) > 0 + + async def test_deep_hierarchical_domain(self): + """Deeply nested hierarchical domain 'legal.contracts.shareholder'.""" + result = await validate_bundle( + mthds_file_path=VALID_DIR / "hierarchical_domain_deep.mthds", + library_dirs=[VALID_DIR], + ) + assert result is not None + assert len(result.blueprints) == 1 + assert result.blueprints[0].domain == "legal.contracts.shareholder" + assert len(result.pipes) > 0 + + async def test_cross_domain_pipe_ref_in_sequence(self): + """Cross-domain pipe ref 'scoring.compute_score' in a PipeSequence step.""" + result = await validate_bundle( + mthds_file_path=VALID_DIR / "cross_domain_pipe_refs.mthds", + library_dirs=[VALID_DIR], + ) + assert result is not None + assert len(result.blueprints) == 1 + assert result.blueprints[0].domain == "orchestration" + assert len(result.pipes) > 0 + + async def test_cross_domain_concept_ref_with_hierarchical_domain(self): + """Cross-domain concept ref 'legal.contracts.NonCompeteClause' as input.""" + result = await validate_bundle( + mthds_file_path=VALID_DIR / "cross_domain_concept_refs.mthds", + library_dirs=[VALID_DIR], + ) + assert result is not None + assert len(result.blueprints) == 1 + assert result.blueprints[0].domain == "analysis" + assert len(result.pipes) > 0 + + async def test_multi_bundle_directory_load(self): + """All valid .mthds files from the fixtures directory loaded together.""" + result = await validate_bundles_from_directory(directory=VALID_DIR) + assert result is not None + assert len(result.blueprints) >= 6 + + domain_names = {blueprint.domain for blueprint in result.blueprints} + assert "legal" in domain_names + assert "legal.contracts" in domain_names + assert "legal.contracts.shareholder" in domain_names + assert "scoring" in domain_names + assert "orchestration" in domain_names + assert "analysis" in domain_names + + # ========== NEGATIVE TESTS ========== + + async def test_invalid_double_dot_domain(self): + """Domain 'legal..contracts' should raise a validation error.""" + with pytest.raises((ValidateBundleError, PipelexInterpreterError)): + await validate_bundle( + mthds_file_path=INVALID_DIR / "invalid_double_dot.mthds_invalid", + ) + + async def test_invalid_leading_dot_domain(self): + """Domain '.legal' should raise a validation error.""" + with pytest.raises((ValidateBundleError, PipelexInterpreterError)): + await validate_bundle( + mthds_file_path=INVALID_DIR / "invalid_leading_dot.mthds_invalid", + ) + + async def test_invalid_same_domain_pipe_ref_to_nonexistent(self): + """Same-domain pipe ref to non-existent pipe should raise error.""" + with pytest.raises((ValidateBundleError, PipelexInterpreterError)): + await validate_bundle( + mthds_file_path=INVALID_DIR / "invalid_same_domain_pipe_ref.mthds_invalid", + ) diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/cross_domain_concept_refs.mthds b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/cross_domain_concept_refs.mthds new file mode 100644 index 000000000..9bb48a50a --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/cross_domain_concept_refs.mthds @@ -0,0 +1,11 @@ +domain = "analysis" +description = "Analysis domain using cross-domain concept references" + +[pipe] +[pipe.analyze_clause] +type = "PipeLLM" +description = "Analyze a non-compete clause from the legal.contracts domain" +inputs = { clause = "legal.contracts.NonCompeteClause" } +output = "Text" +model = "$quick-reasoning" +prompt = "Analyze @clause" diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/cross_domain_pipe_refs.mthds b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/cross_domain_pipe_refs.mthds new file mode 100644 index 000000000..d1e413722 --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/cross_domain_pipe_refs.mthds @@ -0,0 +1,12 @@ +domain = "orchestration" +description = "Orchestration domain using cross-domain pipe references" + +[pipe] +[pipe.orchestrate] +type = "PipeSequence" +description = "Orchestrate scoring via cross-domain pipe ref" +inputs = { data = "Text" } +output = "scoring.WeightedScore" +steps = [ + { pipe = "scoring.compute_score", result = "score" }, +] diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_deep.mthds b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_deep.mthds new file mode 100644 index 000000000..a9ff74254 --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_deep.mthds @@ -0,0 +1,14 @@ +domain = "legal.contracts.shareholder" +description = "Deeply nested hierarchical domain for shareholder contracts" + +[concept] +ShareholderAgreement = "A shareholder agreement document" + +[pipe] +[pipe.analyze_agreement] +type = "PipeLLM" +description = "Analyze a shareholder agreement" +inputs = { agreement = "ShareholderAgreement" } +output = "Text" +model = "$quick-reasoning" +prompt = "Analyze @agreement" diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_nested.mthds b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_nested.mthds new file mode 100644 index 000000000..cc267f7ce --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_nested.mthds @@ -0,0 +1,15 @@ +domain = "legal.contracts" +description = "Nested hierarchical domain for legal contracts" + +[concept] +NonCompeteClause = "A non-compete clause in a contract" +ContractSummary = "A summary of a contract" + +[pipe] +[pipe.summarize_contract] +type = "PipeLLM" +description = "Summarize a contract" +inputs = { clause = "NonCompeteClause" } +output = "ContractSummary" +model = "$quick-reasoning" +prompt = "Summarize @clause" diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_single.mthds b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_single.mthds new file mode 100644 index 000000000..1c6a89e6e --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/hierarchical_domain_single.mthds @@ -0,0 +1,14 @@ +domain = "legal" +description = "Single-segment domain baseline" + +[concept] +ContractClause = "A clause in a legal contract" + +[pipe] +[pipe.extract_clause] +type = "PipeLLM" +description = "Extract a clause from a contract" +inputs = { contract = "Text" } +output = "ContractClause" +model = "$quick-reasoning" +prompt = "Extract the clause from @contract" diff --git a/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/scoring.mthds b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/scoring.mthds new file mode 100644 index 000000000..545b3614a --- /dev/null +++ b/tests/integration/pipelex/phase1_hierarchical_domains/valid_fixtures/scoring.mthds @@ -0,0 +1,14 @@ +domain = "scoring" +description = "Scoring domain for cross-domain dependency targets" + +[concept] +WeightedScore = "A weighted score result" + +[pipe] +[pipe.compute_score] +type = "PipeLLM" +description = "Compute a weighted score" +inputs = { data = "Text" } +output = "WeightedScore" +model = "$quick-reasoning" +prompt = "Compute score from @data" diff --git a/tests/integration/pipelex/pipeline/test_load_concepts_only.py b/tests/integration/pipelex/pipeline/test_load_concepts_only.py index e4ee9f9bf..e24712437 100644 --- a/tests/integration/pipelex/pipeline/test_load_concepts_only.py +++ b/tests/integration/pipelex/pipeline/test_load_concepts_only.py @@ -1,4 +1,4 @@ -"""Integration tests for load_concepts_only functions.""" +"""Integration tests for load_concepts_only functions from MTHDS files.""" import tempfile from collections.abc import Callable @@ -15,12 +15,12 @@ class TestLoadConceptsOnly: - """Integration tests for loading concepts only (no pipes) from PLX files.""" + """Integration tests for loading concepts only (no pipes) from MTHDS files.""" def test_load_concepts_only_single_file(self, load_empty_library: Callable[[], str]): - """Test loading concepts from a single PLX file.""" + """Test loading concepts from a single MTHDS file.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain" @@ -33,10 +33,10 @@ def test_load_concepts_only_single_file(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(mthds_file_path=mthds_path) assert isinstance(result, LoadConceptsOnlyResult) assert len(result.blueprints) == 1 @@ -46,7 +46,7 @@ def test_load_concepts_only_single_file(self, load_empty_library: Callable[[], s def test_load_concepts_only_skips_pipes(self, load_empty_library: Callable[[], str]): """Test that pipes are skipped when loading concepts only.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with pipe" @@ -65,10 +65,10 @@ def test_load_concepts_only_skips_pipes(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(mthds_file_path=mthds_path) # Concepts should be loaded assert len(result.concepts) == 1 @@ -82,9 +82,9 @@ def test_load_concepts_only_skips_pipes(self, load_empty_library: Callable[[], s assert len(library.pipe_library.root) == 0 def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[], str]): - """Test loading concepts from a directory with multiple PLX files.""" + """Test loading concepts from a directory with multiple MTHDS files.""" load_empty_library() - plx_content_1 = """ + mthds_content_1 = """ domain = "crm" description = "CRM domain" @@ -95,7 +95,7 @@ def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[] name = { type = "text", description = "Customer name" } """ - plx_content_2 = """ + mthds_content_2 = """ domain = "accounting" description = "Accounting domain" @@ -107,8 +107,8 @@ def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[] """ with tempfile.TemporaryDirectory() as tmp_dir: - (Path(tmp_dir) / "crm.plx").write_text(plx_content_1, encoding="utf-8") - (Path(tmp_dir) / "accounting.plx").write_text(plx_content_2, encoding="utf-8") + (Path(tmp_dir) / "crm.mthds").write_text(mthds_content_1, encoding="utf-8") + (Path(tmp_dir) / "accounting.mthds").write_text(mthds_content_2, encoding="utf-8") result = load_concepts_only_from_directory(directory=Path(tmp_dir)) @@ -122,7 +122,7 @@ def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[] def test_load_concepts_only_with_concept_references(self, load_empty_library: Callable[[], str]): """Test loading concepts that reference other concepts.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with concept references" @@ -141,10 +141,10 @@ def test_load_concepts_only_with_concept_references(self, load_empty_library: Ca """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(mthds_file_path=mthds_path) assert len(result.concepts) == 2 @@ -161,7 +161,7 @@ def test_load_concepts_only_with_concept_references(self, load_empty_library: Ca def test_load_concepts_only_detects_cycles(self, load_empty_library: Callable[[], str]): """Test that cycle detection still works when loading concepts only.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cycles" @@ -179,17 +179,17 @@ def test_load_concepts_only_detects_cycles(self, load_empty_library: Callable[[] """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") with pytest.raises(Exception, match=r"[Cc]ycle"): - load_concepts_only(plx_file_path=plx_path) + load_concepts_only(mthds_file_path=mthds_path) def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable[[], str]): """Test loading concepts with library dependencies.""" load_empty_library() - # Library PLX with shared concepts - library_plx = """ + # Library MTHDS with shared concepts + library_mthds = """ domain = "shared" description = "Shared library" @@ -201,8 +201,8 @@ def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable city = { type = "text", description = "City" } """ - # Main PLX that references the library concept - main_plx = """ + # Main MTHDS that references the library concept + main_mthds = """ domain = "main" description = "Main domain" @@ -215,12 +215,12 @@ def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable """ with tempfile.TemporaryDirectory() as lib_dir, tempfile.TemporaryDirectory() as main_dir: - (Path(lib_dir) / "shared.plx").write_text(library_plx, encoding="utf-8") - main_plx_path = Path(main_dir) / "main.plx" - main_plx_path.write_text(main_plx, encoding="utf-8") + (Path(lib_dir) / "shared.mthds").write_text(library_mthds, encoding="utf-8") + main_mthds_path = Path(main_dir) / "main.mthds" + main_mthds_path.write_text(main_mthds, encoding="utf-8") result = load_concepts_only( - plx_file_path=main_plx_path, + mthds_file_path=main_mthds_path, library_dirs=[Path(lib_dir)], ) @@ -238,10 +238,10 @@ def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable assert address is not None assert customer is not None - def test_load_concepts_only_with_plx_content(self, load_empty_library: Callable[[], str]): - """Test loading concepts from PLX content string.""" + def test_load_concepts_only_with_mthds_content(self, load_empty_library: Callable[[], str]): + """Test loading concepts from MTHDS content string.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain" @@ -252,7 +252,7 @@ def test_load_concepts_only_with_plx_content(self, load_empty_library: Callable[ name = { type = "text", description = "Item name" } """ - result = load_concepts_only(plx_content=plx_content) + result = load_concepts_only(mthds_content=mthds_content) assert len(result.blueprints) == 1 assert len(result.concepts) == 1 @@ -261,7 +261,7 @@ def test_load_concepts_only_with_plx_content(self, load_empty_library: Callable[ def test_load_concepts_only_with_refines(self, load_empty_library: Callable[[], str]): """Test loading concepts with refines relationships.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with refines" @@ -277,10 +277,10 @@ def test_load_concepts_only_with_refines(self, load_empty_library: Callable[[], """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(mthds_file_path=mthds_path) assert len(result.concepts) == 2 @@ -291,7 +291,7 @@ def test_load_concepts_only_with_refines(self, load_empty_library: Callable[[], def test_load_concepts_only_directory_skips_pipes(self, load_empty_library: Callable[[], str]): """Test that pipes are skipped when loading from directory.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with pipe" @@ -310,7 +310,7 @@ def test_load_concepts_only_directory_skips_pipes(self, load_empty_library: Call """ with tempfile.TemporaryDirectory() as tmp_dir: - (Path(tmp_dir) / "test.plx").write_text(plx_content, encoding="utf-8") + (Path(tmp_dir) / "test.mthds").write_text(mthds_content, encoding="utf-8") result = load_concepts_only_from_directory(directory=Path(tmp_dir)) diff --git a/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py b/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py index ae9a7ea83..ee089f9c7 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py @@ -50,7 +50,7 @@ async def test_simple_batch_processing( pipe_batch_blueprint = PipeBatchBlueprint( description="Simple batch processing test", - branch_pipe_code="uppercase_transformer", # This exists in the PLX file + branch_pipe_code="uppercase_transformer", # This exists in the MTHDS file inputs={ "text_list": concept_1.concept_ref, }, diff --git a/tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.plx b/tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.mthds similarity index 92% rename from tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.plx rename to tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.mthds index d34b6b104..b1ac84099 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.mthds @@ -1,4 +1,4 @@ -domain = "test_integration1" +domain = "test_integration1" description = "Simple pipes for testing PipeBatch integration" [concept] @@ -18,4 +18,3 @@ Transform the following text to uppercase and add the prefix "UPPER: ": Just return the transformed text, nothing else. """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.mthds similarity index 71% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.mthds index b85e3d6f8..63d1441c0 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.mthds @@ -1,4 +1,4 @@ -domain = "test_pipe_condition" +domain = "test_pipe_condition" description = "Simple test for PipeCondition functionality" [concept] @@ -6,17 +6,17 @@ CategoryInput = "Input with a category field" [pipe] [pipe.basic_condition_by_category] -type = "PipeCondition" -description = "Route based on category field" -inputs = { input_data = "CategoryInput" } -output = "native.Text" +type = "PipeCondition" +description = "Route based on category field" +inputs = { input_data = "CategoryInput" } +output = "native.Text" expression_template = "{{ input_data.category }}" -default_outcome = "continue" +default_outcome = "continue" [pipe.basic_condition_by_category.outcomes] -small = "process_small" +small = "process_small" medium = "process_medium" -large = "process_large" +large = "process_large" [pipe.process_small] type = "PipeLLM" @@ -41,4 +41,3 @@ output = "native.Text" prompt = """ Output this only: "large" """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.mthds similarity index 70% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.mthds index c476e7874..c9bb96475 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.mthds @@ -1,4 +1,4 @@ -domain = "test_pipe_condition_2" +domain = "test_pipe_condition_2" description = "Simple test for PipeCondition functionality using expression" [concept] @@ -6,17 +6,17 @@ CategoryInput = "Input with a category field" [pipe] [pipe.basic_condition_by_category_2] -type = "PipeCondition" -description = "Route based on category field using expression" -inputs = { input_data = "CategoryInput" } -output = "native.Text" -expression = "input_data.category" +type = "PipeCondition" +description = "Route based on category field using expression" +inputs = { input_data = "CategoryInput" } +output = "native.Text" +expression = "input_data.category" default_outcome = "continue" [pipe.basic_condition_by_category_2.outcomes] -small = "process_small_2" +small = "process_small_2" medium = "process_medium_2" -large = "process_large_2" +large = "process_large_2" [pipe.process_small_2] type = "PipeLLM" @@ -41,4 +41,3 @@ output = "native.Text" prompt = """ Output this only: "large" """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.mthds similarity index 75% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.mthds index cd5c0df13..5a8037ec0 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.mthds @@ -1,68 +1,68 @@ -domain = "test_pipe_condition_complex" +domain = "test_pipe_condition_complex" description = "Complex document processing pipeline with multiple inputs and nested PipeConditions" [concept] -DocumentRequest = "Document processing request with type, priority, language, and complexity" -UserProfile = "User profile with level and department information" +DocumentRequest = "Document processing request with type, priority, language, and complexity" +UserProfile = "User profile with level and department information" ProcessingContext = "Combined processing context" [pipe] # Main entry point - routes by document type first [pipe.complex_document_processor] -type = "PipeCondition" -description = "Primary routing by document type" -inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } -output = "native.Text" +type = "PipeCondition" +description = "Primary routing by document type" +inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } +output = "native.Text" expression_template = "{{ doc_request.document_type }}" -default_outcome = "continue" +default_outcome = "continue" [pipe.complex_document_processor.outcomes] technical = "technical_document_router" -business = "business_document_router" -legal = "legal_document_router" +business = "business_document_router" +legal = "legal_document_router" # Technical document processing branch [pipe.technical_document_router] -type = "PipeCondition" -description = "Route technical documents by priority and user level" -inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } -output = "native.Text" +type = "PipeCondition" +description = "Route technical documents by priority and user level" +inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } +output = "native.Text" expression_template = "{% if doc_request.priority == 'urgent' %}urgent_tech{% elif user_profile.user_level == 'expert' and doc_request.complexity == 'high' %}expert_tech{% else %}standard_tech{% endif %}" -default_outcome = "continue" +default_outcome = "continue" [pipe.technical_document_router.outcomes] -urgent_tech = "urgent_technical_processor" -expert_tech = "expert_technical_processor" +urgent_tech = "urgent_technical_processor" +expert_tech = "expert_technical_processor" standard_tech = "standard_technical_processor" # Business document processing branch [pipe.business_document_router] -type = "PipeCondition" -description = "Route business documents by department and priority" -inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } -output = "native.Text" +type = "PipeCondition" +description = "Route business documents by department and priority" +inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } +output = "native.Text" expression_template = "{% if doc_request.priority == 'urgent' %}urgent_business{% elif user_profile.department == 'finance' %}finance_business{% elif user_profile.department == 'marketing' %}marketing_business{% else %}general_business{% endif %}" -default_outcome = "continue" +default_outcome = "continue" [pipe.business_document_router.outcomes] -urgent_business = "urgent_business_processor" -finance_business = "finance_business_processor" +urgent_business = "urgent_business_processor" +finance_business = "finance_business_processor" marketing_business = "marketing_business_processor" -general_business = "general_business_processor" +general_business = "general_business_processor" # Legal document processing branch [pipe.legal_document_router] -type = "PipeCondition" -description = "Route legal documents by complexity and user level" -inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } -output = "native.Text" +type = "PipeCondition" +description = "Route legal documents by complexity and user level" +inputs = { doc_request = "DocumentRequest", user_profile = "UserProfile" } +output = "native.Text" expression_template = "{% if doc_request.complexity == 'high' and user_profile.user_level != 'beginner' %}complex_legal{% elif doc_request.language != 'english' %}international_legal{% else %}standard_legal{% endif %}" -default_outcome = "continue" +default_outcome = "continue" [pipe.legal_document_router.outcomes] -complex_legal = "complex_legal_processor" +complex_legal = "complex_legal_processor" international_legal = "international_legal_processor" -standard_legal = "standard_legal_processor" +standard_legal = "standard_legal_processor" # Leaf processors - Technical [pipe.urgent_technical_processor] @@ -201,4 +201,3 @@ Process this standard legal document. Output: "STANDARD_LEGAL_PROCESSED" """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.mthds similarity index 59% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.mthds index e72d95d07..0cfe6849c 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.mthds @@ -1,27 +1,27 @@ -domain = "test_pipe_condition_continue_output_type" +domain = "test_pipe_condition_continue_output_type" description = "Test PipeCondition with continue outcome and batching over verified links" [concept] VerifiedLink = "A verified link with a verdict (approved or rejected)" -Constraint = "A mathematical price constraint" +Constraint = "A mathematical price constraint" [pipe] [pipe.process_verified_links] -type = "PipeBatch" -description = "Batches over verified links and routes each based on verdict." -inputs = { verified_links = "VerifiedLink[]" } -output = "Constraint[]" -input_list_name = "verified_links" -input_item_name = "verified_link" +type = "PipeBatch" +description = "Batches over verified links and routes each based on verdict." +inputs = { verified_links = "VerifiedLink[]" } +output = "Constraint[]" +input_list_name = "verified_links" +input_item_name = "verified_link" branch_pipe_code = "build_or_skip" [pipe.build_or_skip] -type = "PipeCondition" -description = "Routes approved links to builder, rejected links to skip (continue)." -inputs = { verified_link = "VerifiedLink" } -output = "Constraint" +type = "PipeCondition" +description = "Routes approved links to builder, rejected links to skip (continue)." +inputs = { verified_link = "VerifiedLink" } +output = "Constraint" expression_template = "{{ verified_link.verdict }}" -default_outcome = "continue" +default_outcome = "continue" [pipe.build_or_skip.outcomes] approved = "build_single_constraint" diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py b/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py index 601a42655..5b50cf027 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py @@ -108,7 +108,7 @@ async def test_condition_short_text_processing( ): """Test PipeCondition with short text that should trigger add_prefix_short_text pipe.""" load_test_library([Path("tests/integration/pipelex/pipes/controller/pipe_condition")]) - # Create PipeCondition instance - pipes are loaded from PLX files + # Create PipeCondition instance - pipes are loaded from MTHDS files pipe_condition_blueprint = PipeConditionBlueprint( description="Text length condition for short text testing", inputs={"input_text": f"{SpecialDomain.NATIVE}.{NativeConceptCode.TEXT}"}, diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.mthds similarity index 95% rename from tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.mthds index 96cde5d9d..4376c6116 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.mthds @@ -1,4 +1,4 @@ -domain = "test_integration2" +domain = "test_integration2" description = "Test pipes for PipeCondition based on text length" [pipe] @@ -24,4 +24,3 @@ prompt = """Add the prefix "SHORT: " to the beginning of the following text: @input_text.text Return only the prefixed text, nothing else.""" - diff --git a/tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.plx b/tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.mthds similarity index 96% rename from tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.plx rename to tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.mthds index 550ac8d12..62615693d 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.mthds @@ -1,4 +1,4 @@ -domain = "test_integration3" +domain = "test_integration3" description = "Test pipes for PipeParallel text analysis" [pipe] @@ -36,4 +36,3 @@ structuring_method = "preliminary_text" Extract the top 3 keywords from the following text. Return them as a comma-separated list: @input_text.text """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.plx b/tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.mthds similarity index 77% rename from tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.plx rename to tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.mthds index 3c4cf42dd..8ae71ad97 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.mthds @@ -1,10 +1,10 @@ -domain = "test_pipe_parallel" +domain = "test_pipe_parallel" description = "Simple test for PipeParallel functionality" [concept] -DocumentInput = "Input document with text content" -LengthAnalysis = "Analysis of document length and structure" -ContentAnalysis = "Analysis of document content and themes" +DocumentInput = "Input document with text content" +LengthAnalysis = "Analysis of document length and structure" +ContentAnalysis = "Analysis of document content and themes" CombinedAnalysis = "Combined analysis results from parallel processing" [pipe] @@ -15,9 +15,9 @@ inputs = { document = "DocumentInput" } output = "CombinedAnalysis" add_each_output = true combined_output = "CombinedAnalysis" -parallels = [ - { pipe = "analyze_length", result = "length_result" }, - { pipe = "analyze_content", result = "content_result" }, +branches = [ + { pipe = "analyze_length", result = "length_result" }, + { pipe = "analyze_content", result = "content_result" }, ] [pipe.analyze_length] @@ -53,4 +53,3 @@ Provide a brief analysis focusing on: - Key concepts discussed - Overall content summary """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py b/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py index 2ec240177..2b9a9bbdf 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py @@ -27,12 +27,12 @@ async def test_parallel_text_analysis( ): """Test PipeParallel running three text analysis pipes in parallel.""" load_test_library([Path("tests/integration/pipelex/pipes/controller/pipe_parallel")]) - # Create PipeParallel instance - pipes are loaded from PLX files + # Create PipeParallel instance - pipes are loaded from MTHDS files pipe_parallel_blueprint = PipeParallelBlueprint( description="Parallel text analysis pipeline", inputs={"input_text": f"{SpecialDomain.NATIVE}.{NativeConceptCode.TEXT}"}, output=f"{SpecialDomain.NATIVE}.{NativeConceptCode.TEXT}", - parallels=[ + branches=[ SubPipeBlueprint(pipe="analyze_sentiment", result="sentiment_result"), SubPipeBlueprint(pipe="count_words", result="word_count_result"), SubPipeBlueprint(pipe="extract_keywords", result="keywords_result"), @@ -151,7 +151,7 @@ async def test_parallel_short_text_analysis( description="Parallel text analysis pipeline for short text", inputs={"input_text": f"{SpecialDomain.NATIVE}.{NativeConceptCode.TEXT}"}, output=f"{SpecialDomain.NATIVE}.{NativeConceptCode.TEXT}", - parallels=[ + branches=[ SubPipeBlueprint(pipe="analyze_sentiment", result="sentiment_result"), SubPipeBlueprint(pipe="count_words", result="word_count_result"), SubPipeBlueprint(pipe="extract_keywords", result="keywords_result"), diff --git a/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_validation.py b/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_validation.py index 1db6fa6d5..1ddd7e105 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_validation.py +++ b/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_validation.py @@ -71,7 +71,7 @@ def test_pipe_parallel_with_real_pipe_structure(self, load_empty_library: Callab "context": concept_2.code, }, output=ConceptFactory.make_concept_ref_with_domain(domain_code=domain_code, concept_code=concept_3.code), - parallels=[SubPipeBlueprint(pipe=real_pipe.code, result="analysis_result")], + branches=[SubPipeBlueprint(pipe=real_pipe.code, result="analysis_result")], add_each_output=True, combined_output=None, ) @@ -123,7 +123,7 @@ def test_pipe_parallel_creation(self, load_empty_library: Callable[[], None]): description="Basic parallel pipe for testing", inputs={"input_var": concept_1.concept_ref}, output=ConceptFactory.make_concept_ref_with_domain(domain_code=domain_code, concept_code=concept_3.code), - parallels=[SubPipeBlueprint(pipe="test_pipe_1", result="result_1")], + branches=[SubPipeBlueprint(pipe="test_pipe_1", result="result_1")], add_each_output=True, combined_output=None, ) @@ -178,7 +178,7 @@ def test_pipe_parallel_needed_inputs_structure(self, load_empty_library: Callabl "context": concept_2.concept_ref, }, output=ConceptFactory.make_concept_ref_with_domain(domain_code=domain_code, concept_code=concept_3.code), - parallels=[], # No sub-pipes to avoid dependency issues + branches=[], # No sub-pipes to avoid dependency issues add_each_output=True, combined_output=None, ) diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.mthds similarity index 94% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.mthds index af2b8845b..a7c32a669 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.mthds @@ -1,4 +1,4 @@ -domain = "test_integration4" +domain = "test_integration4" description = "Test pipe for capitalizing text to uppercase" [pipe] @@ -23,4 +23,3 @@ prompt = """Add the prefix "PROCESSED: " to the beginning of the following text: @capitalized_text.text Return only the prefixed text, nothing else.""" - diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.mthds similarity index 80% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.mthds index 041302661..297ac2a69 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.mthds @@ -1,10 +1,10 @@ -domain = "discord_newsletter" +domain = "discord_newsletter" description = "Create newsletters from Discord channel content by summarizing messages and organizing them according to newsletter format" [concept] DiscordChannelUpdate = "A Discord channel with its messages for newsletter generation" -ChannelSummary = "A summarized Discord channel for newsletter inclusion" -HtmlNewsletter = "The final newsletter content in html format with organized channel summaries" +ChannelSummary = "A summarized Discord channel for newsletter inclusion" +HtmlNewsletter = "The final newsletter content in html format with organized channel summaries" [pipe.write_discord_newsletter] type = "PipeSequence" @@ -12,19 +12,19 @@ description = "Create a newsletter from Discord articles by summarizing channels inputs = { discord_channel_updates = "DiscordChannelUpdate[]" } output = "HtmlNewsletter" steps = [ - { pipe = "summarize_discord_channel_update", batch_over = "discord_channel_updates", batch_as = "discord_channel_update", result = "channel_summaries" }, - { pipe = "write_weekly_summary", result = "weekly_summary" }, - { pipe = "format_html_newsletter", result = "html_newsletter" }, + { pipe = "summarize_discord_channel_update", batch_over = "discord_channel_updates", batch_as = "discord_channel_update", result = "channel_summaries" }, + { pipe = "write_weekly_summary", result = "weekly_summary" }, + { pipe = "format_html_newsletter", result = "html_newsletter" }, ] [pipe.summarize_discord_channel_update] -type = "PipeCondition" -description = "Select the appropriate summary pipe based on the channel name" -inputs = { discord_channel_update = "DiscordChannelUpdate" } -output = "ChannelSummary" -expression = "discord_channel_update.name" -outcomes = { "Introduce-Yourself" = "summarize_discord_channel_update_for_new_members" } +type = "PipeCondition" +description = "Select the appropriate summary pipe based on the channel name" +inputs = { discord_channel_update = "DiscordChannelUpdate" } +output = "ChannelSummary" +expression = "discord_channel_update.name" +outcomes = { "Introduce-Yourself" = "summarize_discord_channel_update_for_new_members" } default_outcome = "summarize_discord_channel_update_general" [pipe.summarize_discord_channel_update_for_new_members] @@ -74,10 +74,10 @@ Keep it short: 200 characters. """ [pipe.format_html_newsletter] -type = "PipeCompose" +type = "PipeCompose" description = "Combine weekly and channel summaries into a complete newsletter following specific formatting requirements" -inputs = { weekly_summary = "Text", channel_summaries = "ChannelSummary[]" } -output = "HtmlNewsletter" +inputs = { weekly_summary = "Text", channel_summaries = "ChannelSummary[]" } +output = "HtmlNewsletter" [pipe.format_html_newsletter.template] category = "html" @@ -124,4 +124,3 @@ $weekly_summary {% endfor %} {% endif %} """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.mthds similarity index 78% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.mthds index 7e87d360e..bbc148160 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.mthds @@ -1,9 +1,9 @@ -domain = "simple_text_processing" -description = "Simple text processing pipeline without batching" +domain = "simple_text_processing" +description = "Simple text processing pipeline without batching" system_prompt = "You are an expert at text analysis and processing" [concept] -RawText = "Raw input text to be processed" +RawText = "Raw input text to be processed" CleanedText = "Text that has been cleaned and normalized" SummaryText = "A summary of the processed text" @@ -14,8 +14,8 @@ description = "Process text through cleaning and summarization" inputs = { raw_text = "RawText" } output = "SummaryText" steps = [ - { pipe = "clean_text", result = "cleaned_text" }, - { pipe = "generate_summary", result = "final_summary" }, + { pipe = "clean_text", result = "cleaned_text" }, + { pipe = "generate_summary", result = "final_summary" }, ] [pipe.clean_text] @@ -44,4 +44,3 @@ Generate a concise summary of the following text in 2-3 sentences: @cleaned_text """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.mthds similarity index 71% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.mthds index 03b346751..686ab86ec 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.mthds @@ -1,12 +1,12 @@ -domain = "customer_feedback" -description = "Processing customer reviews and feedback" +domain = "customer_feedback" +description = "Processing customer reviews and feedback" system_prompt = "You are an expert at analyzing customer feedback and sentiment" [concept] -CustomerReview = "A single customer review text" +CustomerReview = "A single customer review text" SentimentAnalysis = "Sentiment analysis result for a review" -ProductRating = "Overall product rating based on reviews" -ReviewDocument = "A document containing multiple customer reviews" +ProductRating = "Overall product rating based on reviews" +ReviewDocument = "A document containing multiple customer reviews" [pipe] [pipe.analyze_reviews_sequence] @@ -15,9 +15,9 @@ description = "Process customer reviews with sentiment analysis" inputs = { document = "ReviewDocument" } output = "ProductRating" steps = [ - { pipe = "extract_individual_reviews", result = "review_list" }, - { pipe = "analyze_review_sentiment", batch_over = "review_list", batch_as = "single_review", result = "sentiment_analyses" }, - { pipe = "aggregate_review_results", result = "product_rating" }, + { pipe = "extract_individual_reviews", result = "review_list" }, + { pipe = "analyze_review_sentiment", batch_over = "review_list", batch_as = "single_review", result = "sentiment_analyses" }, + { pipe = "aggregate_review_results", result = "product_rating" }, ] [pipe.extract_individual_reviews] @@ -59,4 +59,3 @@ Based on these sentiment analyses, provide an overall product rating: Give a rating from 1-5 stars with explanation. """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.mthds similarity index 78% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.mthds index be3cf0499..d0a460aa0 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.plx +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.mthds @@ -1,13 +1,13 @@ -domain = "creative_ideation" -description = "Creative ideation pipeline with multiple outputs, batching, and evaluation" +domain = "creative_ideation" +description = "Creative ideation pipeline with multiple outputs, batching, and evaluation" system_prompt = "You are a creative brainstorming expert who generates and evaluates ideas" [concept] -CreativeTopic = "A topic or theme for creative ideation" -CreativeIdea = "A single creative idea or concept" -IdeaAnalysis = "Detailed analysis of a single creative idea" +CreativeTopic = "A topic or theme for creative ideation" +CreativeIdea = "A single creative idea or concept" +IdeaAnalysis = "Detailed analysis of a single creative idea" IdeaEvaluation = "An evaluation and ranking of creative ideas" -BestIdea = "The top-ranked creative idea with justification" +BestIdea = "The top-ranked creative idea with justification" [pipe] [pipe.creative_ideation_sequence] @@ -16,10 +16,10 @@ description = "Generate multiple ideas, analyze each individually, then select t inputs = { topic = "CreativeTopic" } output = "BestIdea" steps = [ - { pipe = "generate_multiple_ideas", result = "idea_list" }, - { pipe = "analyze_single_idea", batch_over = "idea_list", batch_as = "single_idea", result = "detailed_analyses" }, - { pipe = "evaluate_all_ideas", result = "evaluation" }, - { pipe = "select_best_idea", result = "final_best_idea" }, + { pipe = "generate_multiple_ideas", result = "idea_list" }, + { pipe = "analyze_single_idea", batch_over = "idea_list", batch_as = "single_idea", result = "detailed_analyses" }, + { pipe = "evaluate_all_ideas", result = "evaluation" }, + { pipe = "select_best_idea", result = "final_best_idea" }, ] [pipe.generate_multiple_ideas] @@ -109,4 +109,3 @@ Provide: 2. Key strengths that make it the best choice 3. Potential next steps for implementation """ - diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py index 6010579e2..ffb44e92f 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py @@ -21,7 +21,7 @@ class TestData: """Test data for pipe_sequence list output bug.""" - PLX_BUNDLE: ClassVar[str] = """ + MTHDS_BUNDLE: ClassVar[str] = """ domain = "test_list_output" description = "Test bundle for list output bug" @@ -85,13 +85,13 @@ async def test_pipe_llm_list_output_produces_list_content_in_sequence(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_BUNDLE) # Load the bundle result = await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -116,13 +116,13 @@ async def test_standalone_pipe_llm_with_list_output(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_BUNDLE) # Load the bundle await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -143,7 +143,7 @@ async def test_standalone_pipe_llm_with_list_output(self): class TestDataNested: """Test data for nested pipe_sequence list output bug.""" - PLX_BUNDLE: ClassVar[str] = """ + MTHDS_BUNDLE: ClassVar[str] = """ domain = "test_nested_list_output" description = "Test bundle for nested list output bug" @@ -251,13 +251,13 @@ async def test_nested_sequence_with_list_output_and_batch_over(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestDataNested.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestDataNested.MTHDS_BUNDLE) # Load the bundle result = await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -278,13 +278,13 @@ async def test_inner_sequence_directly(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestDataNested.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestDataNested.MTHDS_BUNDLE) # Load the bundle await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py index fce24895b..0f59065bb 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py @@ -41,7 +41,7 @@ async def test_simple_sequence_processing( concept_library.add_concepts([concept_1]) concept_2 = get_native_concept(native_concept=NativeConceptCode.TEXT) - # Create PipeSequence instance - pipes are loaded from PLX files + # Create PipeSequence instance - pipes are loaded from MTHDS files pipe_sequence_blueprint = PipeSequenceBlueprint( description="Simple sequence for text processing", inputs={"input_text": concept_1.concept_ref}, diff --git a/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py b/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py index 7954d17d1..b633b1b48 100644 --- a/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py +++ b/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py @@ -147,7 +147,7 @@ async def test_analyze_image_collection( assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: - # Verify that the output is the Analysis concept from the PLX file + # Verify that the output is the Analysis concept from the MTHDS file assert pipe_output.main_stuff.concept.code == "Analysis" async def test_compare_two_image_collections( @@ -198,7 +198,7 @@ async def test_compare_two_image_collections( assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: - # Verify that the output is the Analysis concept from the PLX file + # Verify that the output is the Analysis concept from the MTHDS file assert pipe_output.main_stuff.concept.code == "Analysis" @pytest.mark.parametrize(("_topic", "data_url"), ImageTestCases.DATA_URL_VISION_TEST_CASES) diff --git a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.mthds b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.mthds new file mode 100644 index 000000000..fc219bc0a --- /dev/null +++ b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.mthds @@ -0,0 +1,53 @@ +domain = "compose_structured_test" +description = "Concepts for testing PipeCompose with construct (StructuredContent output)" + +[concept] +Address = "Address for nested structure testing" +Deal = "Deal for working memory input testing" +SalesSummary = "Sales summary for construct composition testing" +SimpleReport = "Simple report for fixed value testing" +Company = "Company with nested address for testing nested composition" +Order = "Order for invoice testing" +Customer = "Customer for invoice testing" +InvoiceDocument = "Invoice document for nested construct testing" + +# Content type conversion testing concepts +MarkdownText = "TextContent subclass with format metadata" +ReportWithStrField = "Report with str field for TextContent to str conversion" +ReportWithTextContent = "Report with TextContent field to keep TextContent object" +ReportWithMarkdown = "Report with MarkdownText field to keep subclass object" +TeamMember = "Team member for list testing" +TeamReport = "Team report with list[TeamMember] field for list extraction" +TeamReportWithListContent = "Team report with ListContent field to keep ListContent object" + +# Subclassing and class equivalence testing concepts +RichTextContent = "TextContent subclass with formatting metadata" +ReportWithBaseTextContent = "Report expecting base TextContent accepts subclasses" +Person = "Person model for class equivalence testing" +Employee = "Employee model structurally equivalent to Person" +Manager = "Manager subclass of Person with extra field" +TeamWithPersons = "Team expecting list[Person] tests item subclassing" +TeamWithEmployees = "Team expecting list[Employee] tests item class equivalence" +TeamWithListContentPersons = "Team expecting ListContent[Person] tests item subclassing" +Product = "Product model for mixed list testing" +DiscountedProduct = "Product subclass with discount field" +Catalog = "Catalog expecting list[Product] tests subclass items" + +# Direct StructuredContent object composition testing concepts +PersonHolder = "Container with Person field for direct object composition" +EmployeeHolder = "Container with Employee field for class equivalence" +ManagerHolder = "Container with Manager field for subclass testing" +Location = "Location model with different fields than Person/Employee" +LocationHolder = "Container with Location field for incompatible class testing" + +# StuffContent subclass testing concepts (ImageContent, DocumentContent, etc.) +ImageGallery = "Gallery with ImageContent fields" +DocumentArchive = "Archive with DocumentContent fields" +Metrics = "Metrics container with NumberContent fields" +PageReport = "Report containing PageContent" +CodeSnippet = "Container for MermaidContent" +WebContent = "Container for HtmlContent" +DataPayload = "Container for JSONContent" +MixedMediaReport = "Report with multiple StuffContent types" +ImageListGallery = "Gallery with a list of ImageContent" +DocumentBundle = "Bundle with a list of DocumentContent" diff --git a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.plx b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.plx deleted file mode 100644 index bf9cbc778..000000000 --- a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.plx +++ /dev/null @@ -1,54 +0,0 @@ -domain = "compose_structured_test" -description = "Concepts for testing PipeCompose with construct (StructuredContent output)" - -[concept] -Address = "Address for nested structure testing" -Deal = "Deal for working memory input testing" -SalesSummary = "Sales summary for construct composition testing" -SimpleReport = "Simple report for fixed value testing" -Company = "Company with nested address for testing nested composition" -Order = "Order for invoice testing" -Customer = "Customer for invoice testing" -InvoiceDocument = "Invoice document for nested construct testing" - -# Content type conversion testing concepts -MarkdownText = "TextContent subclass with format metadata" -ReportWithStrField = "Report with str field for TextContent to str conversion" -ReportWithTextContent = "Report with TextContent field to keep TextContent object" -ReportWithMarkdown = "Report with MarkdownText field to keep subclass object" -TeamMember = "Team member for list testing" -TeamReport = "Team report with list[TeamMember] field for list extraction" -TeamReportWithListContent = "Team report with ListContent field to keep ListContent object" - -# Subclassing and class equivalence testing concepts -RichTextContent = "TextContent subclass with formatting metadata" -ReportWithBaseTextContent = "Report expecting base TextContent accepts subclasses" -Person = "Person model for class equivalence testing" -Employee = "Employee model structurally equivalent to Person" -Manager = "Manager subclass of Person with extra field" -TeamWithPersons = "Team expecting list[Person] tests item subclassing" -TeamWithEmployees = "Team expecting list[Employee] tests item class equivalence" -TeamWithListContentPersons = "Team expecting ListContent[Person] tests item subclassing" -Product = "Product model for mixed list testing" -DiscountedProduct = "Product subclass with discount field" -Catalog = "Catalog expecting list[Product] tests subclass items" - -# Direct StructuredContent object composition testing concepts -PersonHolder = "Container with Person field for direct object composition" -EmployeeHolder = "Container with Employee field for class equivalence" -ManagerHolder = "Container with Manager field for subclass testing" -Location = "Location model with different fields than Person/Employee" -LocationHolder = "Container with Location field for incompatible class testing" - -# StuffContent subclass testing concepts (ImageContent, DocumentContent, etc.) -ImageGallery = "Gallery with ImageContent fields" -DocumentArchive = "Archive with DocumentContent fields" -Metrics = "Metrics container with NumberContent fields" -PageReport = "Report containing PageContent" -CodeSnippet = "Container for MermaidContent" -WebContent = "Container for HtmlContent" -DataPayload = "Container for JSONContent" -MixedMediaReport = "Report with multiple StuffContent types" -ImageListGallery = "Gallery with a list of ImageContent" -DocumentBundle = "Bundle with a list of DocumentContent" - diff --git a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py index 43ee0c3cf..2e84c0bce 100644 --- a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py +++ b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py @@ -1,7 +1,7 @@ """Integration tests for PipeCompose with construct (StructuredContent output). These tests verify that PipeCompose can produce StructuredContent objects -using the construct blueprint syntax in PLX files. +using the construct blueprint syntax in MTHDS files. """ from pathlib import Path diff --git a/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py b/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py index 4c9f86b02..5c73034f7 100644 --- a/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py +++ b/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py @@ -20,8 +20,8 @@ class TestData: """Test data for pipe_func validation error tests.""" @staticmethod - def make_plx_content(function_name: str) -> str: - """Generate PLX content for testing a specific function.""" + def make_mthds_content(function_name: str) -> str: + """Generate MTHDS content for testing a specific function.""" return f""" domain = "test_pipe_func_validation" description = "Test bundle for pipe_func validation error reporting" @@ -33,7 +33,7 @@ def make_plx_content(function_name: str) -> str: output = "Text" """ - PLX_CONTENT_WITH_PIPE_FUNC: ClassVar[str] = """ + MTHDS_CONTENT_WITH_PIPE_FUNC: ClassVar[str] = """ domain = "test_pipe_func_validation" description = "Test bundle for pipe_func validation error reporting" @@ -195,9 +195,9 @@ async def test_pipe_func_missing_return_type_reports_clear_error(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_CONTENT_WITH_PIPE_FUNC) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_CONTENT_WITH_PIPE_FUNC) # Create the .py file with the function (missing return type) py_file = temp_path / "my_funcs.py" @@ -207,7 +207,7 @@ async def test_pipe_func_missing_return_type_reports_clear_error(self): # Currently raises LibraryError, but ValidateBundleError is also acceptable with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -242,9 +242,9 @@ async def test_pipe_func_with_return_type_validates_successfully(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_CONTENT_WITH_PIPE_FUNC) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_CONTENT_WITH_PIPE_FUNC) # Create the .py file with the function (WITH return type) py_file = temp_path / "my_funcs.py" @@ -252,7 +252,7 @@ async def test_pipe_func_with_return_type_validates_successfully(self): # Validate the bundle - should succeed result = await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -277,14 +277,14 @@ async def test_pipe_func_decorated_but_ineligible_not_silently_ignored(self): py_file = temp_path / "my_funcs.py" py_file.write_text(TestData.FUNC_WITH_DECORATOR_NO_RETURN_TYPE) - # Create .plx file that references the function - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_CONTENT_WITH_PIPE_FUNC) + # Create .mthds file that references the function + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_CONTENT_WITH_PIPE_FUNC) # Try to validate - should fail with informative error with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -328,9 +328,9 @@ async def test_ineligible_function_returns_correct_error( with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file referencing the function - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.make_plx_content(function_name)) + # Create the .mthds file referencing the function + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.make_mthds_content(function_name)) # Create the .py file with the ineligible function py_file = temp_path / "my_funcs.py" @@ -339,7 +339,7 @@ async def test_ineligible_function_returns_correct_error( # Validate the bundle - should fail with a specific error message with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -380,8 +380,8 @@ class MyStructuredContent(StructuredContent): async def func_wrong_structure_class(working_memory: WorkingMemory) -> MyStructuredContent: return MyStructuredContent(name="test") """ - # PLX file that expects Text output (which uses TextContent) - plx_content = """ + # MTHDS file that expects Text output (which uses TextContent) + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for pipe_func return type validation" @@ -394,9 +394,9 @@ async def func_wrong_structure_class(working_memory: WorkingMemory) -> MyStructu with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -405,7 +405,7 @@ async def func_wrong_structure_class(working_memory: WorkingMemory) -> MyStructu # Validate the bundle - should fail because return type doesn't match concept's structure class with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -438,8 +438,8 @@ async def test_pipe_func_list_content_with_array_output_validates_successfully(s async def func_returns_list_content(working_memory: WorkingMemory) -> ListContent[TextContent]: return ListContent(items=[TextContent(text="test1"), TextContent(text="test2")]) """ - # PLX file with array output notation using built-in Text concept - plx_content = """ + # MTHDS file with array output notation using built-in Text concept + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for ListContent validation" @@ -452,9 +452,9 @@ async def func_returns_list_content(working_memory: WorkingMemory) -> ListConten with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -462,7 +462,7 @@ async def func_returns_list_content(working_memory: WorkingMemory) -> ListConten # Validate the bundle - should succeed result = await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -490,8 +490,8 @@ class WrongItem(StructuredContent): async def func_returns_wrong_list_content(working_memory: WorkingMemory) -> ListContent[WrongItem]: return ListContent(items=[WrongItem(different_field=42)]) """ - # PLX file expects Text[] (TextContent) but function returns ListContent[WrongItem] - plx_content = """ + # MTHDS file expects Text[] (TextContent) but function returns ListContent[WrongItem] + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for ListContent validation error" @@ -504,9 +504,9 @@ async def func_returns_wrong_list_content(working_memory: WorkingMemory) -> List with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -515,7 +515,7 @@ async def func_returns_wrong_list_content(working_memory: WorkingMemory) -> List # Validate the bundle - should fail with clear error about item type mismatch with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -548,8 +548,8 @@ async def test_pipe_func_array_output_requires_list_content_return_type(self): async def func_returns_single_instead_of_list(working_memory: WorkingMemory) -> TextContent: return TextContent(text="single item - should be a list!") """ - # PLX file expects Text[] (array) but function returns single TextContent - plx_content = """ + # MTHDS file expects Text[] (array) but function returns single TextContent + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for ListContent requirement" @@ -562,9 +562,9 @@ async def func_returns_single_instead_of_list(working_memory: WorkingMemory) -> with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -573,7 +573,7 @@ async def func_returns_single_instead_of_list(working_memory: WorkingMemory) -> # Validate the bundle - should fail because return type is not ListContent with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) diff --git a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.mthds b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.mthds new file mode 100644 index 000000000..9d5fadd83 --- /dev/null +++ b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.mthds @@ -0,0 +1,8 @@ +domain = "test_structured_generations" +description = "Concepts to test basic structures without unions" + +[concept] +ConceptWithSimpleStructure = "A simple structure" +ConceptWithOptionals = "A structure with optionals" +ConceptWithLists = "A structure with lists" +ConceptWithNestedStructures = "A structure with nested structures" diff --git a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.plx b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.plx deleted file mode 100644 index e5485fce8..000000000 --- a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.plx +++ /dev/null @@ -1,9 +0,0 @@ -domain = "test_structured_generations" -description = "Concepts to test basic structures without unions" - -[concept] -ConceptWithSimpleStructure = "A simple structure" -ConceptWithOptionals = "A structure with optionals" -ConceptWithLists = "A structure with lists" -ConceptWithNestedStructures = "A structure with nested structures" - diff --git a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.mthds b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.mthds new file mode 100644 index 000000000..8bb7eedef --- /dev/null +++ b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.mthds @@ -0,0 +1,8 @@ +domain = "test_structured_generations2" +description = "Concepts to test complex structures (dicts, unions, etc.)" + +[concept] +ConceptWithDicts = "A structure with dicts" +ConceptWithUnions = "A structure with union types" +ConceptWithComplexUnions = "A structure with more complex union combinations" +ConceptWithNestedUnions = "A structure with nested unions" diff --git a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.plx b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.plx deleted file mode 100644 index 8484412ea..000000000 --- a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.plx +++ /dev/null @@ -1,9 +0,0 @@ -domain = "test_structured_generations2" -description = "Concepts to test complex structures (dicts, unions, etc.)" - -[concept] -ConceptWithDicts = "A structure with dicts" -ConceptWithUnions = "A structure with union types" -ConceptWithComplexUnions = "A structure with more complex union combinations" -ConceptWithNestedUnions = "A structure with nested unions" - diff --git a/tests/integration/pipelex/pipes/pipelines/crazy_image_generation.plx b/tests/integration/pipelex/pipes/pipelines/crazy_image_generation.mthds similarity index 82% rename from tests/integration/pipelex/pipes/pipelines/crazy_image_generation.plx rename to tests/integration/pipelex/pipes/pipelines/crazy_image_generation.mthds index 1030ea53b..bc96852c9 100644 --- a/tests/integration/pipelex/pipes/pipelines/crazy_image_generation.plx +++ b/tests/integration/pipelex/pipes/pipelines/crazy_image_generation.mthds @@ -1,6 +1,6 @@ -domain = "crazy_image_generation" +domain = "crazy_image_generation" description = "Imagining and rendering absurd, funny images with unexpected surreal elements" -main_pipe = "generate_crazy_image" +main_pipe = "generate_crazy_image" [concept.ImagePrompt] description = """ @@ -15,8 +15,8 @@ Main pipeline that orchestrates the full crazy image generation flow - imagines """ output = "Image" steps = [ - { pipe = "imagine_scene", result = "image_prompt" }, - { pipe = "render_image", result = "crazy_image" }, + { pipe = "imagine_scene", result = "image_prompt" }, + { pipe = "render_image", result = "crazy_image" }, ] [pipe.imagine_scene] @@ -34,9 +34,9 @@ Generate a creative, absurd, and funny image concept. Combine unexpected element """ [pipe.render_image] -type = "PipeImgGen" +type = "PipeImgGen" description = "Generates the absurd image based on the creative scene description" -inputs = { image_prompt = "ImagePrompt" } -output = "Image" -prompt = "$image_prompt" -model = "@default-small" +inputs = { image_prompt = "ImagePrompt" } +output = "Image" +prompt = "$image_prompt" +model = "@default-small" diff --git a/tests/integration/pipelex/pipes/pipelines/failing_pipelines.plx b/tests/integration/pipelex/pipes/pipelines/failing_pipelines.mthds similarity index 77% rename from tests/integration/pipelex/pipes/pipelines/failing_pipelines.plx rename to tests/integration/pipelex/pipes/pipelines/failing_pipelines.mthds index b66b7e3ac..fce00adf8 100644 --- a/tests/integration/pipelex/pipes/pipelines/failing_pipelines.plx +++ b/tests/integration/pipelex/pipes/pipelines/failing_pipelines.mthds @@ -17,7 +17,6 @@ type = "PipeSequence" description = "This pipe will cause an infinite loop" output = "Text" steps = [ - { pipe = "dummy", result = "dummy_result" }, - { pipe = "infinite_loop_1", result = "disaster" }, + { pipe = "dummy", result = "dummy_result" }, + { pipe = "infinite_loop_1", result = "disaster" }, ] - diff --git a/tests/integration/pipelex/pipes/pipelines/flows.plx b/tests/integration/pipelex/pipes/pipelines/flows.mthds similarity index 86% rename from tests/integration/pipelex/pipes/pipelines/flows.plx rename to tests/integration/pipelex/pipes/pipelines/flows.mthds index 05073ec7b..a0fbeae3a 100644 --- a/tests/integration/pipelex/pipes/pipelines/flows.plx +++ b/tests/integration/pipelex/pipes/pipelines/flows.mthds @@ -1,6 +1,6 @@ -domain = "flows" +domain = "flows" description = "A collection of pipes that are used to test the flow of a pipeline" [concept] @@ -22,8 +22,8 @@ description = "Sequence for parallel test" inputs = { color = "Color" } output = "Color" steps = [ - { pipe = "capitalize_color", result = "capitalized_color" }, - { pipe = "capitalize_last_letter", result = "capitalized_last_letter" }, + { pipe = "capitalize_color", result = "capitalized_color" }, + { pipe = "capitalize_last_letter", result = "capitalized_last_letter" }, ] [pipe.capitalize_color] @@ -53,4 +53,3 @@ Here is the word: Output only the word, nothing else. """ - diff --git a/tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.plx b/tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.mthds similarity index 95% rename from tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.plx rename to tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.mthds index f8d718ea9..c4939d968 100644 --- a/tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.plx +++ b/tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.mthds @@ -1,4 +1,4 @@ -domain = "test_multiple_images_input_to_llm" +domain = "test_multiple_images_input_to_llm" description = "Test pipeline that takes multiple images as input to a PipeLLM." [concept] diff --git a/tests/integration/pipelex/pipes/pipelines/multiplicity.plx b/tests/integration/pipelex/pipes/pipelines/multiplicity.mthds similarity index 77% rename from tests/integration/pipelex/pipes/pipelines/multiplicity.plx rename to tests/integration/pipelex/pipes/pipelines/multiplicity.mthds index 1a21c5fc4..a7a86670d 100644 --- a/tests/integration/pipelex/pipes/pipelines/multiplicity.plx +++ b/tests/integration/pipelex/pipes/pipelines/multiplicity.mthds @@ -1,12 +1,12 @@ -domain = "test_multiplicity" +domain = "test_multiplicity" description = "Test library about multiplicity" [concept] -Color = "A color" +Color = "A color" ProductOfNature = "Something produced by Nature" -FantasyScene = "A fantasy scene" +FantasyScene = "A fantasy scene" [pipe.original_power_ranger_colors] type = "PipeLLM" @@ -55,9 +55,9 @@ type = "PipeSequence" description = "Imagine nature scenes of Power Rangers colors" output = "ImgGenPrompt" steps = [ - { pipe = "original_power_ranger_colors", result = "color" }, - { pipe = "imagine_nature_product", result = "product_of_nature" }, - { pipe = "imagine_fantasy_scene_including_products_of_nature" }, + { pipe = "original_power_ranger_colors", result = "color" }, + { pipe = "imagine_nature_product", result = "product_of_nature" }, + { pipe = "imagine_fantasy_scene_including_products_of_nature" }, ] [pipe.imagine_nature_scene_of_alltime_power_rangers_colors] @@ -65,8 +65,7 @@ type = "PipeSequence" description = "Imagine nature scenes of Power Rangers colors" output = "ImgGenPrompt" steps = [ - { pipe = "alltime_power_ranger_colors", result = "color" }, - { pipe = "imagine_nature_product", result = "product_of_nature" }, - { pipe = "imagine_fantasy_scene_including_products_of_nature" }, + { pipe = "alltime_power_ranger_colors", result = "color" }, + { pipe = "imagine_nature_product", result = "product_of_nature" }, + { pipe = "imagine_fantasy_scene_including_products_of_nature" }, ] - diff --git a/tests/integration/pipelex/pipes/pipelines/refined_concepts.plx b/tests/integration/pipelex/pipes/pipelines/refined_concepts.mthds similarity index 64% rename from tests/integration/pipelex/pipes/pipelines/refined_concepts.plx rename to tests/integration/pipelex/pipes/pipelines/refined_concepts.mthds index f8cefd983..51a9c29a2 100644 --- a/tests/integration/pipelex/pipes/pipelines/refined_concepts.plx +++ b/tests/integration/pipelex/pipes/pipelines/refined_concepts.mthds @@ -1,10 +1,10 @@ -domain = "refined_concepts_test" +domain = "refined_concepts_test" description = "Test library for refined concept inputs" [concept.Photo] description = "A photograph" -refines = "Image" +refines = "Image" [concept.Report] description = "A report document" -refines = "Document" +refines = "Document" diff --git a/tests/integration/pipelex/pipes/pipelines/test_image_inputs.plx b/tests/integration/pipelex/pipes/pipelines/test_image_inputs.mthds similarity index 95% rename from tests/integration/pipelex/pipes/pipelines/test_image_inputs.plx rename to tests/integration/pipelex/pipes/pipelines/test_image_inputs.mthds index deffbc8c9..a7f125022 100644 --- a/tests/integration/pipelex/pipes/pipelines/test_image_inputs.plx +++ b/tests/integration/pipelex/pipes/pipelines/test_image_inputs.mthds @@ -1,4 +1,4 @@ -domain = "test_image_inputs" +domain = "test_image_inputs" description = "Test domain for verifying image input functionality" [concept] @@ -34,4 +34,3 @@ Extract the year and title. Also, add this to the description: $page.text_and_images.text.text """ - diff --git a/tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx b/tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds similarity index 63% rename from tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx rename to tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds index 5ca238da6..97142e80b 100644 --- a/tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx +++ b/tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds @@ -1,15 +1,15 @@ -domain = "test_image_in_out" +domain = "test_image_in_out" description = "Test domain for verifying image output / input support" -main_pipe = "image_out_in" +main_pipe = "image_out_in" [pipe] [pipe.generate_image] -type = "PipeImgGen" +type = "PipeImgGen" description = "Generate an image from a prompt" -output = "Image" -prompt = "A beautiful landscape" -model = "$gen-image-testing" +output = "Image" +prompt = "A beautiful landscape" +model = "$gen-image-testing" [pipe.describe_image] type = "PipeLLM" @@ -27,6 +27,6 @@ type = "PipeSequence" description = "Generate an image from a prompt and describe it" output = "Text" steps = [ - { pipe = "generate_image", result = "image" }, - { pipe = "describe_image", result = "visual_description" }, -] \ No newline at end of file + { pipe = "generate_image", result = "image" }, + { pipe = "describe_image", result = "visual_description" }, +] diff --git a/tests/integration/pipelex/pipes/pipelines/tests.plx b/tests/integration/pipelex/pipes/pipelines/tests.mthds similarity index 93% rename from tests/integration/pipelex/pipes/pipelines/tests.plx rename to tests/integration/pipelex/pipes/pipelines/tests.mthds index 7a24c943a..0f939f0fc 100644 --- a/tests/integration/pipelex/pipes/pipelines/tests.plx +++ b/tests/integration/pipelex/pipes/pipelines/tests.mthds @@ -1,12 +1,12 @@ -domain = "tests" +domain = "tests" description = "This library is intended for testing purposes" [concept] FictionCharacter = "A character in a fiction story" ArticleAndCritic = "An article and a critique of it" -Complex = "A complex object" +Complex = "A complex object" [pipe.simple_llm_test_from_image] type = "PipeLLM" @@ -35,4 +35,3 @@ model = { model = "gpt-4o-mini", temperature = 0.5, max_tokens = 1000 } prompt = """ Tell me a short story about a red baloon. """ - diff --git a/tests/integration/pipelex/pipes/test_bracket_notation_controllers.py b/tests/integration/pipelex/pipes/test_bracket_notation_controllers.py index ed128ce67..dee8c2082 100644 --- a/tests/integration/pipelex/pipes/test_bracket_notation_controllers.py +++ b/tests/integration/pipelex/pipes/test_bracket_notation_controllers.py @@ -39,7 +39,7 @@ def test_pipe_parallel_with_bracket_notation(self, load_empty_library: Callable[ description="Process items in parallel", inputs={"data": "DataItem[2]"}, output="ProcessedData", - parallels=[], + branches=[], add_each_output=True, ) diff --git a/tests/unit/pipelex/builder/pipe/pipe_controller/pipe_parallel/test_data.py b/tests/unit/pipelex/builder/pipe/pipe_controller/pipe_parallel/test_data.py index 97d9a0696..12a3c424b 100644 --- a/tests/unit/pipelex/builder/pipe/pipe_controller/pipe_parallel/test_data.py +++ b/tests/unit/pipelex/builder/pipe/pipe_controller/pipe_parallel/test_data.py @@ -14,7 +14,7 @@ class PipeParallelTestCases: description="Run pipes in parallel", inputs={"data": "Data"}, output="Results", - parallels=[ + branches=[ SubPipeSpec(pipe_code="analyze_data", result="analysis"), SubPipeSpec(pipe_code="transform_data", result="transformed"), SubPipeSpec(pipe_code="validate_data", result="validation"), @@ -25,7 +25,7 @@ class PipeParallelTestCases: description="Run pipes in parallel", inputs={"data": "Data"}, output="Results", - parallels=[ + branches=[ SubPipeBlueprint(pipe="analyze_data", result="analysis"), SubPipeBlueprint(pipe="transform_data", result="transformed"), SubPipeBlueprint(pipe="validate_data", result="validation"), @@ -43,7 +43,7 @@ class PipeParallelTestCases: description="Parallel with combined output", inputs={"input": "Input"}, output="CombinedResult", - parallels=[ + branches=[ SubPipeSpec(pipe_code="pipe1", result="result1"), SubPipeSpec(pipe_code="pipe2", result="result2"), ], @@ -54,7 +54,7 @@ class PipeParallelTestCases: description="Parallel with combined output", inputs={"input": "Input"}, output="CombinedResult", - parallels=[ + branches=[ SubPipeBlueprint(pipe="pipe1", result="result1"), SubPipeBlueprint(pipe="pipe2", result="result2"), ], @@ -71,7 +71,7 @@ class PipeParallelTestCases: description="Parallel with combined output", inputs={"input": "Input"}, output="CombinedResult", - parallels=[ + branches=[ SubPipeSpec(pipe_code="pipe1", result="result1"), SubPipeSpec(pipe_code="pipe2", result="result2"), ], @@ -82,7 +82,7 @@ class PipeParallelTestCases: description="Parallel with combined output", inputs={"input": "Input"}, output="CombinedResult", - parallels=[ + branches=[ SubPipeBlueprint(pipe="pipe1", result="result1"), SubPipeBlueprint(pipe="pipe2", result="result2"), ], diff --git a/tests/unit/pipelex/builder/test_builder_manifest_generation.py b/tests/unit/pipelex/builder/test_builder_manifest_generation.py new file mode 100644 index 000000000..9f8cf6438 --- /dev/null +++ b/tests/unit/pipelex/builder/test_builder_manifest_generation.py @@ -0,0 +1,68 @@ +import shutil +from pathlib import Path + +from pipelex.builder.builder_loop import maybe_generate_manifest_for_output +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.manifest_parser import parse_methods_toml + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestBuilderManifestGeneration: + """Tests for post-build METHODS.toml generation.""" + + def test_multiple_domains_generates_manifest(self, tmp_path: Path) -> None: + """Output dir with multiple domains -> METHODS.toml generated.""" + # Copy two .mthds files with different domains + shutil.copy(PACKAGES_DATA_DIR / "legal_tools" / "legal" / "contracts.mthds", tmp_path / "contracts.mthds") + shutil.copy(PACKAGES_DATA_DIR / "legal_tools" / "scoring" / "scoring.mthds", tmp_path / "scoring.mthds") + + result = maybe_generate_manifest_for_output(output_dir=tmp_path) + + assert result is not None + manifest_path = tmp_path / MANIFEST_FILENAME + assert manifest_path.exists() + + content = manifest_path.read_text(encoding="utf-8") + manifest = parse_methods_toml(content) + assert manifest.version == "0.1.0" + assert len(manifest.exports) >= 2 + + # Check that main_pipe entries are exported + exported_pipes: list[str] = [] + for domain_export in manifest.exports: + exported_pipes.extend(domain_export.pipes) + assert "pkg_test_extract_clause" in exported_pipes + assert "pkg_test_compute_weighted_score" in exported_pipes + + def test_single_domain_no_manifest(self, tmp_path: Path) -> None: + """Output dir with single domain -> no METHODS.toml generated.""" + shutil.copy(PACKAGES_DATA_DIR / "minimal_package" / "core.mthds", tmp_path / "core.mthds") + + result = maybe_generate_manifest_for_output(output_dir=tmp_path) + + assert result is None + manifest_path = tmp_path / MANIFEST_FILENAME + assert not manifest_path.exists() + + def test_exported_pipes_include_main_pipe(self, tmp_path: Path) -> None: + """Exported pipes include main_pipe entries from each bundle.""" + shutil.copy(PACKAGES_DATA_DIR / "legal_tools" / "legal" / "contracts.mthds", tmp_path / "contracts.mthds") + shutil.copy(PACKAGES_DATA_DIR / "legal_tools" / "scoring" / "scoring.mthds", tmp_path / "scoring.mthds") + + maybe_generate_manifest_for_output(output_dir=tmp_path) + + manifest_path = tmp_path / MANIFEST_FILENAME + content = manifest_path.read_text(encoding="utf-8") + manifest = parse_methods_toml(content) + + # Build a lookup of domain -> pipes + domain_pipes: dict[str, list[str]] = {} + for domain_export in manifest.exports: + domain_pipes[domain_export.domain_path] = domain_export.pipes + + # contracts.mthds has main_pipe = "pkg_test_extract_clause" + assert "pkg_test_extract_clause" in domain_pipes.get("pkg_test_legal.contracts", []) + # scoring.mthds has main_pipe = "pkg_test_compute_weighted_score" + assert "pkg_test_compute_weighted_score" in domain_pipes.get("pkg_test_scoring", []) diff --git a/tests/unit/pipelex/builder/test_runner_generator.py b/tests/unit/pipelex/builder/test_runner_generator.py index 91bcff2e1..a0c69103e 100644 --- a/tests/unit/pipelex/builder/test_runner_generator.py +++ b/tests/unit/pipelex/builder/test_runner_generator.py @@ -225,7 +225,7 @@ def test_runner_code_includes_imports(self, mock_pipe_single_output: MagicMock) runner_code = generate_runner_code(mock_pipe_single_output) assert "import asyncio" in runner_code assert "from pipelex.pipelex import Pipelex" in runner_code - assert "from pipelex.pipeline.execute import execute_pipeline" in runner_code + assert "from pipelex.pipeline.runner import PipelexRunner" in runner_code def test_runner_code_includes_structure_imports(self, mock_pipe_single_output: MagicMock) -> None: """Test that generated runner code includes structure class imports.""" diff --git a/tests/unit/pipelex/cli/test_agent_graph_cmd.py b/tests/unit/pipelex/cli/test_agent_graph_cmd.py index a60d2171e..7c82d9188 100644 --- a/tests/unit/pipelex/cli/test_agent_graph_cmd.py +++ b/tests/unit/pipelex/cli/test_agent_graph_cmd.py @@ -14,13 +14,13 @@ from pytest_mock import MockerFixture from pipelex.cli.agent_cli.commands.graph_cmd import GraphFormat, graph_cmd -from pipelex.core.interpreter.exceptions import PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError GRAPH_CMD_MODULE = "pipelex.cli.agent_cli.commands.graph_cmd" class TestGraphCmd: - """Tests for the graph command that generates HTML from a .plx bundle.""" + """Tests for the graph command that generates HTML from a .mthds bundle.""" def _mock_blueprint(self, mocker: MockerFixture, *, main_pipe: str = "my_pipe") -> None: """Mock bundle parsing to return a blueprint with the given main_pipe.""" @@ -32,7 +32,7 @@ def _mock_blueprint(self, mocker: MockerFixture, *, main_pipe: str = "my_pipe") ) def _mock_execution(self, mocker: MockerFixture, *, graph_spec_present: bool = True) -> None: - """Mock the Pipelex init, execute_pipeline, graph generation, and teardown.""" + """Mock the Pipelex init, PipelexRunner, graph generation, and teardown.""" mocker.patch(f"{GRAPH_CMD_MODULE}.make_pipelex_for_agent_cli") mocker.patch(f"{GRAPH_CMD_MODULE}.Pipelex.teardown_if_needed") @@ -44,35 +44,37 @@ def _mock_execution(self, mocker: MockerFixture, *, graph_spec_present: bool = T mock_pipe_output.graph_spec = mocker.MagicMock() else: mock_pipe_output.graph_spec = None + mock_response = mocker.MagicMock() + mock_response.pipe_output = mock_pipe_output mock_graph_outputs = mocker.MagicMock() # Patch async functions with non-async mocks so no coroutines are created (avoids "coroutine never awaited" warnings) - mocker.patch(f"{GRAPH_CMD_MODULE}.execute_pipeline", new=mocker.MagicMock()) + mocker.patch(f"{GRAPH_CMD_MODULE}.PipelexRunner") mocker.patch(f"{GRAPH_CMD_MODULE}.generate_graph_outputs", new=mocker.MagicMock()) - # asyncio.run is called twice: first for execute_pipeline, then for generate_graph_outputs - mocker.patch(f"{GRAPH_CMD_MODULE}.asyncio.run", side_effect=[mock_pipe_output, mock_graph_outputs]) + # asyncio.run is called twice: first for runner.execute_pipeline, then for generate_graph_outputs + mocker.patch(f"{GRAPH_CMD_MODULE}.asyncio.run", side_effect=[mock_response, mock_graph_outputs]) mocker.patch( f"{GRAPH_CMD_MODULE}.save_graph_outputs_to_dir", return_value={"reactflow_html": Path("graph/reactflow.html")}, ) - def test_valid_plx_file_produces_success_json( + def test_valid_mthds_file_produces_success_json( self, mocker: MockerFixture, capsys: pytest.CaptureFixture[str], tmp_path: Path, ) -> None: - """Valid .plx file should produce success JSON with pipe_code and output_dir.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + """Valid .mthds file should produce success JSON with pipe_code and output_dir.""" + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) self._mock_execution(mocker) - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) parsed = json.loads(capsys.readouterr().out) assert parsed["success"] is True @@ -80,14 +82,14 @@ def test_valid_plx_file_produces_success_json( assert "output_dir" in parsed assert "files" in parsed - def test_valid_plx_file_calls_asyncio_run_twice( + def test_valid_mthds_file_calls_asyncio_run_twice( self, mocker: MockerFixture, tmp_path: Path, ) -> None: - """Valid .plx file should call asyncio.run twice (execute_pipeline + generate_graph_outputs).""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + """Valid .mthds file should call asyncio.run twice (execute_pipeline + generate_graph_outputs).""" + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) @@ -96,14 +98,16 @@ def test_valid_plx_file_calls_asyncio_run_twice( mocker.patch(f"{GRAPH_CMD_MODULE}.get_config") # Patch async functions with non-async mocks so no coroutines are created (avoids "coroutine never awaited" warnings) - mocker.patch(f"{GRAPH_CMD_MODULE}.execute_pipeline", new=mocker.MagicMock()) + mocker.patch(f"{GRAPH_CMD_MODULE}.PipelexRunner") mocker.patch(f"{GRAPH_CMD_MODULE}.generate_graph_outputs", new=mocker.MagicMock()) mock_pipe_output = mocker.MagicMock() mock_pipe_output.graph_spec = mocker.MagicMock() + mock_response = mocker.MagicMock() + mock_response.pipe_output = mock_pipe_output mock_asyncio_run = mocker.patch( f"{GRAPH_CMD_MODULE}.asyncio.run", - side_effect=[mock_pipe_output, mocker.MagicMock()], + side_effect=[mock_response, mocker.MagicMock()], ) mocker.patch( @@ -111,16 +115,16 @@ def test_valid_plx_file_calls_asyncio_run_twice( return_value={"reactflow_html": Path("graph/reactflow.html")}, ) - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert mock_asyncio_run.call_count == 2 - def test_non_plx_file_produces_error( + def test_non_mthds_file_produces_error( self, capsys: pytest.CaptureFixture[str], tmp_path: Path, ) -> None: - """Non-PLX file (e.g. .json, .txt) should produce an ArgumentError.""" + """Non-MTHDS file (e.g. .json, .txt) should produce an ArgumentError.""" json_file = tmp_path / "graphspec.json" json_file.write_text("{}") @@ -131,7 +135,7 @@ def test_non_plx_file_produces_error( parsed = json.loads(capsys.readouterr().err) assert parsed["error"] is True assert parsed["error_type"] == "ArgumentError" - assert ".plx" in parsed["message"] + assert ".mthds" in parsed["message"] def test_file_not_found_produces_error( self, @@ -139,7 +143,7 @@ def test_file_not_found_produces_error( tmp_path: Path, ) -> None: """Missing file should produce a FileNotFoundError.""" - missing = tmp_path / "nonexistent.plx" + missing = tmp_path / "nonexistent.mthds" with pytest.raises(typer.Exit) as exc_info: graph_cmd(target=str(missing)) @@ -156,8 +160,8 @@ def test_bundle_without_main_pipe_produces_error( tmp_path: Path, ) -> None: """Bundle that doesn't declare main_pipe should produce a BundleError.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[domain]\ncode = "test"') + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[domain]\ncode = "test"') mock_blueprint = mocker.MagicMock() mock_blueprint.main_pipe = None @@ -167,7 +171,7 @@ def test_bundle_without_main_pipe_produces_error( ) with pytest.raises(typer.Exit) as exc_info: - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert exc_info.value.exit_code == 1 parsed = json.loads(capsys.readouterr().err) @@ -182,8 +186,8 @@ def test_no_graph_spec_produces_error( tmp_path: Path, ) -> None: """If pipe_output.graph_spec is None, should produce a GraphSpecMissingError.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) @@ -192,14 +196,16 @@ def test_no_graph_spec_produces_error( mocker.patch(f"{GRAPH_CMD_MODULE}.get_config") # Patch async function with non-async mock so no coroutine is created (avoids "coroutine never awaited" warning) - mocker.patch(f"{GRAPH_CMD_MODULE}.execute_pipeline", new=mocker.MagicMock()) + mocker.patch(f"{GRAPH_CMD_MODULE}.PipelexRunner") mock_pipe_output = mocker.MagicMock() mock_pipe_output.graph_spec = None - mocker.patch(f"{GRAPH_CMD_MODULE}.asyncio.run", return_value=mock_pipe_output) + mock_response = mocker.MagicMock() + mock_response.pipe_output = mock_pipe_output + mocker.patch(f"{GRAPH_CMD_MODULE}.asyncio.run", return_value=mock_response) with pytest.raises(typer.Exit) as exc_info: - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert exc_info.value.exit_code == 1 parsed = json.loads(capsys.readouterr().err) @@ -222,13 +228,13 @@ def test_format_option_produces_success( format_option: GraphFormat, ) -> None: """Each format option should produce success JSON.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) self._mock_execution(mocker) - graph_cmd(target=str(plx_file), graph_format=format_option) + graph_cmd(target=str(mthds_file), graph_format=format_option) parsed = json.loads(capsys.readouterr().out) assert parsed["success"] is True @@ -239,25 +245,25 @@ def test_default_format_is_reactflow(self) -> None: default = sig.parameters["graph_format"].default assert default == GraphFormat.REACTFLOW - def test_plx_parse_error_produces_error( + def test_mthds_parse_error_produces_error( self, mocker: MockerFixture, capsys: pytest.CaptureFixture[str], tmp_path: Path, ) -> None: - """PLX parse error should produce a PLXDecodeError.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text("invalid toml {{{{") + """MTHDS parse error should produce a MthdsDecodeError.""" + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text("invalid toml {{{{") mocker.patch( f"{GRAPH_CMD_MODULE}.PipelexInterpreter.make_pipelex_bundle_blueprint", - side_effect=PLXDecodeError(message="bad toml", doc="invalid toml {{{{", pos=0, lineno=1, colno=1), + side_effect=MthdsDecodeError(message="bad toml", doc="invalid toml {{{{", pos=0, lineno=1, colno=1), ) with pytest.raises(typer.Exit) as exc_info: - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert exc_info.value.exit_code == 1 parsed = json.loads(capsys.readouterr().err) assert parsed["error"] is True - assert parsed["error_type"] == "PLXDecodeError" + assert parsed["error_type"] == "MthdsDecodeError" diff --git a/tests/unit/pipelex/cli/test_pkg_add.py b/tests/unit/pipelex/cli/test_pkg_add.py new file mode 100644 index 000000000..948062f99 --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_add.py @@ -0,0 +1,119 @@ +import shutil +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.add_cmd import derive_alias_from_address, do_pkg_add +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.manifest_parser import parse_methods_toml + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgAdd: + """Tests for pipelex pkg add command logic.""" + + def test_add_dependency_to_manifest(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Add a dependency to an existing METHODS.toml.""" + # Copy a minimal package + src = PACKAGES_DATA_DIR / "minimal_package" + shutil.copytree(src, tmp_path / "pkg") + pkg_dir = tmp_path / "pkg" + monkeypatch.chdir(pkg_dir) + + do_pkg_add( + address="github.com/org/scoring-lib", + alias="scoring_lib", + version="^2.0.0", + path="../scoring-lib", + ) + + content = (pkg_dir / MANIFEST_FILENAME).read_text(encoding="utf-8") + manifest = parse_methods_toml(content) + assert len(manifest.dependencies) == 1 + dep = manifest.dependencies[0] + assert dep.alias == "scoring_lib" + assert dep.address == "github.com/org/scoring-lib" + assert dep.version == "^2.0.0" + assert dep.path == "../scoring-lib" + + def test_add_dependency_without_path(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Add a dependency without local path.""" + src = PACKAGES_DATA_DIR / "minimal_package" + shutil.copytree(src, tmp_path / "pkg") + pkg_dir = tmp_path / "pkg" + monkeypatch.chdir(pkg_dir) + + do_pkg_add( + address="github.com/org/other-lib", + alias="other_lib", + version="1.0.0", + ) + + content = (pkg_dir / MANIFEST_FILENAME).read_text(encoding="utf-8") + manifest = parse_methods_toml(content) + assert len(manifest.dependencies) == 1 + assert manifest.dependencies[0].path is None + + def test_auto_derive_alias(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Alias should be auto-derived from address if not provided.""" + src = PACKAGES_DATA_DIR / "minimal_package" + shutil.copytree(src, tmp_path / "pkg") + pkg_dir = tmp_path / "pkg" + monkeypatch.chdir(pkg_dir) + + do_pkg_add( + address="github.com/org/scoring-lib", + version="1.0.0", + ) + + content = (pkg_dir / MANIFEST_FILENAME).read_text(encoding="utf-8") + manifest = parse_methods_toml(content) + assert len(manifest.dependencies) == 1 + assert manifest.dependencies[0].alias == "scoring_lib" + + def test_duplicate_alias_refuses(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Adding a dependency with duplicate alias refuses.""" + src = PACKAGES_DATA_DIR / "minimal_package" + shutil.copytree(src, tmp_path / "pkg") + pkg_dir = tmp_path / "pkg" + monkeypatch.chdir(pkg_dir) + + do_pkg_add( + address="github.com/org/first-lib", + alias="my_dep", + version="1.0.0", + ) + + with pytest.raises(Exit): + do_pkg_add( + address="github.com/org/second-lib", + alias="my_dep", + version="2.0.0", + ) + + def test_no_manifest_refuses(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Adding without existing METHODS.toml refuses.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_add( + address="github.com/org/lib", + alias="my_lib", + version="1.0.0", + ) + + @pytest.mark.parametrize( + ("address", "expected_alias"), + [ + ("github.com/org/scoring-lib", "scoring_lib"), + ("github.com/org/my.package", "my_package"), + ("gitlab.com/team/simple", "simple"), + ("github.com/org/UPPERCASE", "uppercase"), + ], + ) + def testderive_alias_from_address(self, address: str, expected_alias: str) -> None: + """Auto-derived alias from various address formats.""" + assert derive_alias_from_address(address) == expected_alias diff --git a/tests/unit/pipelex/cli/test_pkg_graph.py b/tests/unit/pipelex/cli/test_pkg_graph.py new file mode 100644 index 000000000..fc47552a3 --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_graph.py @@ -0,0 +1,151 @@ +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.graph_cmd import do_pkg_graph +from tests.unit.pipelex.core.packages.graph.test_data import ( + LEGAL_TOOLS_ADDRESS, + make_test_package_index, +) + + +def _mock_build_index(_project_root: Path): + """Return the shared test index regardless of project_root.""" + return make_test_package_index() + + +class TestPkgGraph: + """Tests for pipelex pkg graph command logic.""" + + def test_graph_no_options_exits(self) -> None: + """No --from, --to, or --check flags -> exit 1.""" + with pytest.raises(Exit): + do_pkg_graph() + + def test_graph_from_finds_pipes(self, monkeypatch: pytest.MonkeyPatch) -> None: + """--from __native__::native.Text finds pipes that accept Text.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + # Should not raise β€” pipes consuming Text exist in the test data + do_pkg_graph(from_concept="__native__::native.Text") + + def test_graph_to_finds_pipes(self, monkeypatch: pytest.MonkeyPatch) -> None: + """--to with a known concept finds producing pipes.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + # pkg_test_analyze_clause produces Text + do_pkg_graph(to_concept="__native__::native.Text") + + def test_graph_check_compatible(self, monkeypatch: pytest.MonkeyPatch) -> None: + """--check with compatible pipes shows compatible params.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + # extract_clause outputs PkgTestContractClause, analyze_clause accepts it + source_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + target_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + + do_pkg_graph(check=f"{source_key},{target_key}") + + def test_graph_check_incompatible(self, monkeypatch: pytest.MonkeyPatch) -> None: + """--check with incompatible pipes shows yellow warning, no error.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + # analyze_clause: input=PkgTestContractClause, output=Text + # Checking analyze -> analyze: output Text does NOT match input PkgTestContractClause + source_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + target_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + + do_pkg_graph(check=f"{source_key},{target_key}") + + def test_graph_invalid_concept_format_exits(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Bad concept format (missing ::) -> exit 1.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + with pytest.raises(Exit): + do_pkg_graph(from_concept="bad_format_no_separator") + + def test_graph_compose_without_from_to_exits(self) -> None: + """--compose without both --from and --to -> exit 1.""" + with pytest.raises(Exit): + do_pkg_graph(compose=True) + + with pytest.raises(Exit): + do_pkg_graph(from_concept="__native__::native.Text", compose=True) + + with pytest.raises(Exit): + do_pkg_graph(to_concept="__native__::native.Text", compose=True) + + def test_graph_compose_with_from_to_succeeds(self, monkeypatch: pytest.MonkeyPatch) -> None: + """--compose with --from and --to prints composition template without error.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + do_pkg_graph( + from_concept="__native__::native.Text", + to_concept=f"{LEGAL_TOOLS_ADDRESS}::pkg_test_legal.PkgTestContractClause", + compose=True, + ) + + @pytest.mark.parametrize( + "raw_concept", + [ + pytest.param("package::", id="empty_concept_ref"), + pytest.param("::concept", id="empty_package_address"), + pytest.param("::", id="both_empty"), + ], + ) + def test_graph_concept_id_empty_parts_exits(self, monkeypatch: pytest.MonkeyPatch, raw_concept: str) -> None: + """Concept IDs with empty package_address or concept_ref after splitting -> exit 1.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + with pytest.raises(Exit): + do_pkg_graph(from_concept=raw_concept) + + def test_graph_concept_id_multiple_separators_exits(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Concept ID with multiple :: separators -> exit 1.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + with pytest.raises(Exit): + do_pkg_graph(from_concept="package::domain::Concept") + + @pytest.mark.parametrize( + "check_arg", + [ + pytest.param("pipe1,", id="empty_target"), + pytest.param(",pipe2", id="empty_source"), + pytest.param(",", id="both_empty"), + ], + ) + def test_graph_check_empty_pipe_key_exits(self, monkeypatch: pytest.MonkeyPatch, check_arg: str) -> None: + """--check with empty pipe key after comma split -> exit 1.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.graph_cmd.build_index_from_project", + _mock_build_index, + ) + + with pytest.raises(Exit): + do_pkg_graph(check=check_arg) diff --git a/tests/unit/pipelex/cli/test_pkg_index.py b/tests/unit/pipelex/cli/test_pkg_index.py new file mode 100644 index 000000000..80f6ebfb2 --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_index.py @@ -0,0 +1,44 @@ +import shutil +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.index_cmd import do_pkg_index +from pipelex.core.packages.index.models import PackageIndex + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgIndex: + """Tests for pipelex pkg index command logic.""" + + def test_index_project_with_manifest(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """With valid package directory -> displays index table without error.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + + monkeypatch.chdir(tmp_path / "legal_tools") + + do_pkg_index() + + def test_index_empty_project_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Empty directory with no METHODS.toml -> exit 1.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_index() + + def test_index_cache_empty_exits(self, monkeypatch: pytest.MonkeyPatch) -> None: + """Monkeypatched build_index_from_cache returning empty index -> exit 1.""" + + def _empty_cache(_cache_root: Path | None = None) -> PackageIndex: + return PackageIndex() + + monkeypatch.setattr( + "pipelex.cli.commands.pkg.index_cmd.build_index_from_cache", + _empty_cache, + ) + + with pytest.raises(Exit): + do_pkg_index(cache=True) diff --git a/tests/unit/pipelex/cli/test_pkg_init.py b/tests/unit/pipelex/cli/test_pkg_init.py new file mode 100644 index 000000000..10eb0a5d6 --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_init.py @@ -0,0 +1,90 @@ +import shutil +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.init_cmd import do_pkg_init +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.manifest_parser import parse_methods_toml + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgInit: + """Tests for pipelex pkg init command logic.""" + + def test_generate_manifest_from_mthds_files(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """With .mthds files in tmp dir -> generates valid METHODS.toml.""" + src = PACKAGES_DATA_DIR / "minimal_package" / "core.mthds" + shutil.copy(src, tmp_path / "core.mthds") + + monkeypatch.chdir(tmp_path) + + do_pkg_init(force=False) + + manifest_path = tmp_path / MANIFEST_FILENAME + assert manifest_path.exists() + + content = manifest_path.read_text(encoding="utf-8") + manifest = parse_methods_toml(content) + assert manifest.version == "0.1.0" + assert len(manifest.exports) >= 1 + + def test_existing_manifest_without_force_refuses(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Existing METHODS.toml without --force -> refuses.""" + src = PACKAGES_DATA_DIR / "minimal_package" / "core.mthds" + shutil.copy(src, tmp_path / "core.mthds") + (tmp_path / MANIFEST_FILENAME).write_text("[package]\n", encoding="utf-8") + + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_init(force=False) + + def test_existing_manifest_with_force_overwrites(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """With --force -> overwrites existing METHODS.toml.""" + src = PACKAGES_DATA_DIR / "minimal_package" / "core.mthds" + shutil.copy(src, tmp_path / "core.mthds") + (tmp_path / MANIFEST_FILENAME).write_text("[package]\nold = true\n", encoding="utf-8") + + monkeypatch.chdir(tmp_path) + + do_pkg_init(force=True) + + content = (tmp_path / MANIFEST_FILENAME).read_text(encoding="utf-8") + assert "old" not in content + manifest = parse_methods_toml(content) + assert manifest.version == "0.1.0" + + def test_main_pipe_appears_first_in_exports(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """main_pipe should appear first in domain exports, not buried alphabetically.""" + legal_tools_dir = PACKAGES_DATA_DIR / "legal_tools" + # Copy both .mthds files preserving subdirectory structure + for mthds_file in legal_tools_dir.rglob("*.mthds"): + rel = mthds_file.relative_to(legal_tools_dir) + dest = tmp_path / rel + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(mthds_file, dest) + + monkeypatch.chdir(tmp_path) + do_pkg_init(force=False) + + manifest_path = tmp_path / MANIFEST_FILENAME + manifest = parse_methods_toml(manifest_path.read_text(encoding="utf-8")) + + # Find the contracts domain + contracts_export = next( + (exp for exp in manifest.exports if exp.domain_path == "pkg_test_legal.contracts"), + None, + ) + assert contracts_export is not None, "Expected pkg_test_legal.contracts domain in exports" + assert contracts_export.pipes[0] == "pkg_test_extract_clause", f"main_pipe should be first in exports, got: {contracts_export.pipes}" + + def test_no_mthds_files_error(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """No .mthds files -> error message.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_init(force=False) diff --git a/tests/unit/pipelex/cli/test_pkg_inspect.py b/tests/unit/pipelex/cli/test_pkg_inspect.py new file mode 100644 index 000000000..479c41f3e --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_inspect.py @@ -0,0 +1,37 @@ +import shutil +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.inspect_cmd import do_pkg_inspect + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgInspect: + """Tests for pipelex pkg inspect command logic.""" + + def test_inspect_existing_package(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Inspecting a known package address displays details without error.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + monkeypatch.chdir(tmp_path / "legal_tools") + + do_pkg_inspect(address="github.com/pipelexlab/legal-tools") + + def test_inspect_unknown_address_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Inspecting a nonexistent address -> exit 1 with hint.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + monkeypatch.chdir(tmp_path / "legal_tools") + + with pytest.raises(Exit): + do_pkg_inspect(address="no/such/package") + + def test_inspect_empty_project_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """No packages in empty dir -> exit 1.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_inspect(address="any/address") diff --git a/tests/unit/pipelex/cli/test_pkg_install.py b/tests/unit/pipelex/cli/test_pkg_install.py new file mode 100644 index 000000000..5709b77ed --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_install.py @@ -0,0 +1,27 @@ +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.install_cmd import do_pkg_install +from pipelex.core.packages.lock_file import LOCK_FILENAME + + +class TestPkgInstall: + """Tests for pipelex pkg install command logic.""" + + def test_install_no_lock_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """No methods.lock -> Exit.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_install() + + def test_install_empty_lock(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Empty lock file -> 'Nothing to install'.""" + monkeypatch.chdir(tmp_path) + lock_path = tmp_path / LOCK_FILENAME + lock_path.write_text("", encoding="utf-8") + + # Should not raise β€” prints "Nothing to install" + do_pkg_install() diff --git a/tests/unit/pipelex/cli/test_pkg_list.py b/tests/unit/pipelex/cli/test_pkg_list.py new file mode 100644 index 000000000..ffc2e7952 --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_list.py @@ -0,0 +1,42 @@ +import shutil +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.list_cmd import do_pkg_list +from pipelex.core.packages.discovery import MANIFEST_FILENAME + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgList: + """Tests for pipelex pkg list command logic.""" + + def test_display_manifest_info(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """With valid METHODS.toml -> displays info without error.""" + src_manifest = PACKAGES_DATA_DIR / "minimal_package" / MANIFEST_FILENAME + shutil.copy(src_manifest, tmp_path / MANIFEST_FILENAME) + + monkeypatch.chdir(tmp_path) + + # Should not raise β€” it prints to console but doesn't return anything + do_pkg_list() + + def test_no_manifest_found_error(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """No METHODS.toml found -> error exit.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_list() + + def test_display_manifest_with_exports(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """With full METHODS.toml including exports -> displays all sections.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + + monkeypatch.chdir(tmp_path / "legal_tools") + + # Should not raise β€” it prints tables including exports + do_pkg_list() diff --git a/tests/unit/pipelex/cli/test_pkg_lock.py b/tests/unit/pipelex/cli/test_pkg_lock.py new file mode 100644 index 000000000..126daedc8 --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_lock.py @@ -0,0 +1,53 @@ +import shutil +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.lock_cmd import do_pkg_lock +from pipelex.core.packages.lock_file import LOCK_FILENAME + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgLock: + """Tests for pipelex pkg lock command logic.""" + + def test_lock_no_manifest_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """No METHODS.toml -> Exit.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_lock() + + def test_lock_creates_methods_lock(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Manifest with no remote deps -> empty methods.lock.""" + src = PACKAGES_DATA_DIR / "minimal_package" + shutil.copytree(src, tmp_path / "pkg") + pkg_dir = tmp_path / "pkg" + monkeypatch.chdir(pkg_dir) + + do_pkg_lock() + + lock_path = pkg_dir / LOCK_FILENAME + assert lock_path.exists() + + def test_lock_with_local_dep_only(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Local path dep -> empty lock file (local deps excluded).""" + src = PACKAGES_DATA_DIR / "consumer_package" + shutil.copytree(src, tmp_path / "pkg") + + # Also copy the scoring_dep directory so the local path resolves + scoring_src = PACKAGES_DATA_DIR / "scoring_dep" + shutil.copytree(scoring_src, tmp_path / "scoring_dep") + + pkg_dir = tmp_path / "pkg" + monkeypatch.chdir(pkg_dir) + + do_pkg_lock() + + lock_path = pkg_dir / LOCK_FILENAME + assert lock_path.exists() + # Local deps are excluded from lock file + content = lock_path.read_text(encoding="utf-8") + assert "github.com/mthds/scoring-lib" not in content diff --git a/tests/unit/pipelex/cli/test_pkg_publish.py b/tests/unit/pipelex/cli/test_pkg_publish.py new file mode 100644 index 000000000..fb967ac2e --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_publish.py @@ -0,0 +1,189 @@ +import os +import shutil +import subprocess # noqa: S404 +import textwrap +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.publish_cmd import do_pkg_publish +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.publish_validation import PublishValidationResult, validate_for_publish + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + +_original_validate = validate_for_publish + + +def _validate_no_git(package_root: Path, check_git: bool = True) -> PublishValidationResult: + _ = check_git + return _original_validate(package_root, check_git=False) + + +class TestPkgPublish: + """Tests for pipelex pkg publish command logic.""" + + def test_publish_no_manifest_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Empty directory with no METHODS.toml -> exit 1.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_publish() + + def test_publish_valid_package_succeeds(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """legal_tools copy (with lock file stub) -> no exit.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + pkg_dir = tmp_path / "legal_tools" + shutil.copytree(src_dir, pkg_dir) + + # Create a stub lock file so the remote-dep check passes + lock_content = textwrap.dedent("""\ + ["github.com/pipelexlab/scoring-lib"] + version = "2.0.0" + hash = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + source = "https://github.com/pipelexlab/scoring-lib" + """) + (pkg_dir / "methods.lock").write_text(lock_content, encoding="utf-8") + + monkeypatch.setattr( + "pipelex.cli.commands.pkg.publish_cmd.validate_for_publish", + _validate_no_git, + ) + monkeypatch.chdir(pkg_dir) + + # Should not raise + do_pkg_publish() + + def test_publish_with_tag_creates_tag(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Init git repo + minimal_package (no remote deps) -> tag created.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "minimal_package" + shutil.copytree(src_dir, pkg_dir) + + # Add authors and license to avoid warnings-only issues blocking tag + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/pipelexlab/minimal" + version = "0.1.0" + description = "A minimal MTHDS package" + authors = ["Test"] + license = "MIT" + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + # Initialize a git repo so tagging works + subprocess.run(["git", "init"], cwd=pkg_dir, capture_output=True, check=True) # noqa: S607 + subprocess.run(["git", "add", "."], cwd=pkg_dir, capture_output=True, check=True) # noqa: S607 + subprocess.run( + ["git", "commit", "-m", "initial"], # noqa: S607 + cwd=pkg_dir, + capture_output=True, + check=True, + env={ + **os.environ, + "GIT_AUTHOR_NAME": "Test", + "GIT_AUTHOR_EMAIL": "test@test.com", + "GIT_COMMITTER_NAME": "Test", + "GIT_COMMITTER_EMAIL": "test@test.com", + "HOME": str(tmp_path), + }, + ) + + monkeypatch.chdir(pkg_dir) + + do_pkg_publish(tag=True) + + # Verify tag was created + result = subprocess.run( + ["git", "tag", "-l", "v0.1.0"], # noqa: S607 + cwd=pkg_dir, + capture_output=True, + text=True, + check=True, + ) + assert "v0.1.0" in result.stdout + + def test_publish_tag_does_not_reparse_manifest(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Tag creation uses version from validation result, not by re-reading METHODS.toml. + + Regression test: previously _create_git_tag re-parsed METHODS.toml, which could + raise unhandled ManifestParseError/ManifestValidationError if the file was + modified or corrupted between validation and tagging. + """ + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "reparse_check" + shutil.copytree(src_dir, pkg_dir) + + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/pipelexlab/minimal" + version = "0.2.0" + description = "A minimal MTHDS package" + authors = ["Test"] + license = "MIT" + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + # Initialize a git repo so tagging works + subprocess.run(["git", "init"], cwd=pkg_dir, capture_output=True, check=True) # noqa: S607 + subprocess.run(["git", "add", "."], cwd=pkg_dir, capture_output=True, check=True) # noqa: S607 + subprocess.run( + ["git", "commit", "-m", "initial"], # noqa: S607 + cwd=pkg_dir, + capture_output=True, + check=True, + env={ + **os.environ, + "GIT_AUTHOR_NAME": "Test", + "GIT_AUTHOR_EMAIL": "test@test.com", + "GIT_COMMITTER_NAME": "Test", + "GIT_COMMITTER_EMAIL": "test@test.com", + "HOME": str(tmp_path), + }, + ) + + monkeypatch.chdir(pkg_dir) + + # Delete METHODS.toml after validation would have parsed it. + # Old code re-read it here and would crash; new code uses the cached version. + original_validate = validate_for_publish + + def validate_then_delete(package_root: Path, check_git: bool = True) -> PublishValidationResult: + _ = check_git + result = original_validate(package_root, check_git=False) + (package_root / MANIFEST_FILENAME).unlink() + return result + + monkeypatch.setattr( + "pipelex.cli.commands.pkg.publish_cmd.validate_for_publish", + validate_then_delete, + ) + + # Should not raise β€” version comes from validation result, not re-parsed file + do_pkg_publish(tag=True) + + # Verify tag was created with the correct version + result = subprocess.run( + ["git", "tag", "-l", "v0.2.0"], # noqa: S607 + cwd=pkg_dir, + capture_output=True, + text=True, + check=True, + ) + assert "v0.2.0" in result.stdout + + def test_publish_with_warnings_still_succeeds(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """minimal_package (no authors/license) -> warnings but no exit.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "minimal_package" + shutil.copytree(src_dir, pkg_dir) + + monkeypatch.setattr( + "pipelex.cli.commands.pkg.publish_cmd.validate_for_publish", + _validate_no_git, + ) + monkeypatch.chdir(pkg_dir) + + # Should not raise β€” warnings don't block + do_pkg_publish() diff --git a/tests/unit/pipelex/cli/test_pkg_search.py b/tests/unit/pipelex/cli/test_pkg_search.py new file mode 100644 index 000000000..02e9069dc --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_search.py @@ -0,0 +1,163 @@ +import shutil +from io import StringIO +from pathlib import Path + +import pytest +from click.exceptions import Exit +from rich.console import Console + +from pipelex.cli.commands.pkg.search_cmd import do_pkg_search +from pipelex.core.packages.index.models import PackageIndex +from tests.unit.pipelex.core.packages.graph.test_data import make_test_package_index + + +def _mock_build_index(_path: Path) -> PackageIndex: + return make_test_package_index() + + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgSearch: + """Tests for pipelex pkg search command logic.""" + + def test_search_finds_concept(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Search for a known concept code finds it without error.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + monkeypatch.chdir(tmp_path / "legal_tools") + + do_pkg_search(query="ContractClause") + + def test_search_finds_pipe(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Search for a known pipe code finds it without error.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + monkeypatch.chdir(tmp_path / "legal_tools") + + do_pkg_search(query="extract_clause") + + def test_search_no_results(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Search for nonexistent term returns no results without exit.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + monkeypatch.chdir(tmp_path / "legal_tools") + + # Should not raise β€” just prints "no results" message + do_pkg_search(query="zzz_nonexistent_zzz") + + def test_search_domain_filter(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Search with --domain restricts results to that domain.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + monkeypatch.chdir(tmp_path / "legal_tools") + + # Searching for "score" in domain "pkg_test_legal.contracts" should find nothing + # since scoring concepts are in a different domain + do_pkg_search(query="score", domain="pkg_test_legal.contracts") + + def test_search_both_concept_and_pipe_flags(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """When both --concept and --pipe flags are set, treat as 'show both'.""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + shutil.copytree(src_dir, tmp_path / "legal_tools") + monkeypatch.chdir(tmp_path / "legal_tools") + + # Should not raise or show "no results" β€” both concepts and pipes are searched + do_pkg_search(query="ContractClause", concept_only=True, pipe_only=True) + + def test_search_empty_project_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """No packages in empty dir -> exit 1.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_search(query="anything") + + # --- Type-compatible search tests (Phase 7A) --- + + def test_search_accepts_finds_pipes(self, monkeypatch: pytest.MonkeyPatch) -> None: + """accepts='Text' resolves to native.Text and finds pipes that accept it.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + # All test pipes accept Text as input, so this should not raise + do_pkg_search(accepts="Text") + + def test_search_produces_finds_pipes(self, monkeypatch: pytest.MonkeyPatch) -> None: + """produces='PkgTestContractClause' resolves uniquely and finds extract_clause.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + do_pkg_search(produces="PkgTestContractClause") + + def test_search_accepts_ambiguous_concept(self, monkeypatch: pytest.MonkeyPatch) -> None: + """accepts='Score' matches multiple concepts across packages -> Exit raised.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + with pytest.raises(Exit): + do_pkg_search(accepts="Score") + + def test_search_accepts_no_concept_found(self, monkeypatch: pytest.MonkeyPatch) -> None: + """accepts='zzz_nonexistent_zzz' matches nothing -> prints message, no raise.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + do_pkg_search(accepts="zzz_nonexistent_zzz") + + def test_search_produces_no_pipes(self, monkeypatch: pytest.MonkeyPatch) -> None: + """produces='Dynamic' resolves to native.Dynamic but no pipe produces it.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + do_pkg_search(produces="Dynamic") + + def test_search_no_query_or_type_flag_exits(self) -> None: + """No query, no accepts, no produces -> Exit raised.""" + with pytest.raises(Exit): + do_pkg_search() + + def test_search_accepts_exact_match_preferred(self, monkeypatch: pytest.MonkeyPatch) -> None: + """accepts='Text' resolves to exactly native.Text (not TextAndImages) -> no Exit.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + # "Text" is a substring of "TextAndImages", but exact match should prevent ambiguity + do_pkg_search(accepts="Text") + + def test_search_accepts_with_domain_filter(self, monkeypatch: pytest.MonkeyPatch) -> None: + """accepts='Text' with domain='pkg_test_legal' returns only legal-domain pipes.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + # Use a wide console to avoid Rich truncation + string_io = StringIO() + wide_console = Console(file=string_io, width=300) + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.get_console", + lambda: wide_console, + ) + do_pkg_search(accepts="Text", domain="pkg_test_legal") + captured = string_io.getvalue() + # The legal pipe that accepts Text should appear + assert "pkg_test_extract_clause" in captured + # Pipes from other domains should be excluded + assert "pkg_test_compute_score" not in captured + assert "pkg_test_refine_score" not in captured + assert "pkg_test_compute_analytics" not in captured + + def test_search_produces_with_domain_filter(self, monkeypatch: pytest.MonkeyPatch) -> None: + """produces='Text' with domain from a non-matching domain yields no results.""" + monkeypatch.setattr( + "pipelex.cli.commands.pkg.search_cmd.build_index_from_project", + _mock_build_index, + ) + # pkg_test_analyze_clause produces Text and is in pkg_test_legal domain. + # Filtering to pkg_test_scoring_dep should exclude it, yielding no results. + do_pkg_search(produces="Text", domain="pkg_test_scoring_dep") diff --git a/tests/unit/pipelex/cli/test_pkg_update.py b/tests/unit/pipelex/cli/test_pkg_update.py new file mode 100644 index 000000000..f6c0fec14 --- /dev/null +++ b/tests/unit/pipelex/cli/test_pkg_update.py @@ -0,0 +1,33 @@ +import shutil +from pathlib import Path + +import pytest +from click.exceptions import Exit + +from pipelex.cli.commands.pkg.update_cmd import do_pkg_update +from pipelex.core.packages.lock_file import LOCK_FILENAME + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent / "data" / "packages" + + +class TestPkgUpdate: + """Tests for pipelex pkg update command logic.""" + + def test_update_no_manifest_exits(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """No METHODS.toml -> Exit.""" + monkeypatch.chdir(tmp_path) + + with pytest.raises(Exit): + do_pkg_update() + + def test_update_creates_lock_fresh(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + """Creates methods.lock when none exists.""" + src = PACKAGES_DATA_DIR / "minimal_package" + shutil.copytree(src, tmp_path / "pkg") + pkg_dir = tmp_path / "pkg" + monkeypatch.chdir(pkg_dir) + + do_pkg_update() + + lock_path = pkg_dir / LOCK_FILENAME + assert lock_path.exists() diff --git a/tests/unit/pipelex/client/test_api_serialization.py b/tests/unit/pipelex/client/test_api_serialization.py deleted file mode 100644 index 925f9ab4b..000000000 --- a/tests/unit/pipelex/client/test_api_serialization.py +++ /dev/null @@ -1,165 +0,0 @@ -import json -from datetime import datetime -from decimal import Decimal -from enum import Enum -from typing import Any - -import pytest -from pydantic import BaseModel - -from pipelex.client.api_serializer import ApiSerializer -from pipelex.core.concepts.concept_factory import ConceptFactory -from pipelex.core.concepts.native.concept_native import NativeConceptCode -from pipelex.core.memory.working_memory import WorkingMemory -from pipelex.core.memory.working_memory_factory import WorkingMemoryFactory -from pipelex.core.stuffs.number_content import NumberContent -from pipelex.core.stuffs.structured_content import StructuredContent -from pipelex.core.stuffs.stuff_factory import StuffFactory -from pipelex.core.stuffs.text_content import TextContent - - -class DateTimeEvent(StructuredContent): - """Test model for datetime content.""" - - event_name: str - start_time: datetime - end_time: datetime - created_at: datetime - - -# Test models for complex scenarios -class Priority(Enum): - LOW = "low" - MEDIUM = "medium" - HIGH = "high" - - -class TaskStatus(BaseModel): - is_complete: bool - completion_date: datetime | None = None - notes: list[str] = [] - - -class ComplexTask(BaseModel): - task_id: str - title: str - priority: Priority - status: TaskStatus - due_dates: list[datetime] - metadata: dict[str, Any] - score: Decimal | None = None - - -class Project(BaseModel): - name: str - created_at: datetime - tasks: list[ComplexTask] - settings: dict[str, Any] - - -class TestApiSerialization: - @pytest.fixture - def datetime_content_memory(self) -> WorkingMemory: - datetime_event = DateTimeEvent( - event_name="Project Kickoff Meeting", - start_time=datetime(2024, 1, 15, 10, 0, 0, tzinfo=None), - end_time=datetime(2024, 1, 15, 11, 30, 0, tzinfo=None), - created_at=datetime(2024, 1, 1, 9, 0, 0, tzinfo=None), - ) - - stuff = StuffFactory.make_stuff( - concept=ConceptFactory.make( - concept_code="DateTimeEvent", - domain_code="event", - description="event.DateTimeEvent", - structure_class_name="DateTimeEvent", - ), - name="project_meeting", - content=datetime_event, - ) - return WorkingMemoryFactory.make_from_single_stuff(stuff=stuff) - - @pytest.fixture - def text_content_memory(self) -> WorkingMemory: - stuff = StuffFactory.make_stuff( - concept=ConceptFactory.make_native_concept(native_concept_code=NativeConceptCode.TEXT), - name="sample_text", - content=TextContent(text="Sample text content"), - ) - return WorkingMemoryFactory.make_from_single_stuff(stuff=stuff) - - @pytest.fixture - def number_content_memory(self) -> WorkingMemory: - number_content = NumberContent(number=3.14159) - stuff = StuffFactory.make_stuff( - concept=ConceptFactory.make_native_concept(native_concept_code=NativeConceptCode.NUMBER), - name="pi_value", - content=number_content, - ) - return WorkingMemoryFactory.make_from_single_stuff(stuff=stuff) - - def test_serialize_working_memory_with_datetime(self, datetime_content_memory: WorkingMemory): - pipeline_inputs = ApiSerializer.serialize_working_memory_for_api(datetime_content_memory) - - # Should have one entry for the datetime content - assert len(pipeline_inputs) == 1 - assert "project_meeting" in pipeline_inputs - - # Check the dict structure - datetime_dict_stuff = pipeline_inputs["project_meeting"] - assert datetime_dict_stuff["concept"] == "DateTimeEvent" - - # Check content is properly serialized - content = datetime_dict_stuff["content"] - assert isinstance(content, dict) - assert "event_name" in content - assert "start_time" in content - assert "end_time" in content - assert "created_at" in content - - # Verify the event name - assert content["event_name"] == "Project Kickoff Meeting" - - # Verify datetime objects are now formatted as ISO strings - assert content["start_time"] == "2024-01-15T10:00:00" - assert content["end_time"] == "2024-01-15T11:30:00" - assert content["created_at"] == "2024-01-01T09:00:00" - - # Ensure no __module__ or __class__ fields are present - assert "__module__" not in content - assert "__class__" not in content - - def test_api_serialized_memory_is_json_serializable(self, datetime_content_memory: WorkingMemory): - pipeline_inputs = ApiSerializer.serialize_working_memory_for_api(datetime_content_memory) - - # This should NOT raise an exception now - json_string = json.dumps(pipeline_inputs) - roundtrip = json.loads(json_string) - - # Verify roundtrip works - assert roundtrip == pipeline_inputs - - # Verify datetime fields are strings - content = roundtrip["project_meeting"]["content"] - assert isinstance(content["start_time"], str) - assert isinstance(content["end_time"], str) - assert isinstance(content["created_at"], str) - - def test_serialize_text_content(self, text_content_memory: WorkingMemory): - pipeline_inputs = ApiSerializer.serialize_working_memory_for_api(text_content_memory) - - assert len(pipeline_inputs) == 1 - assert "sample_text" in pipeline_inputs - - assert pipeline_inputs["sample_text"]["concept"] == "Text" - assert pipeline_inputs["sample_text"]["content"] == {"text": "Sample text content"} - - def test_serialize_number_content(self, number_content_memory: WorkingMemory): - pipeline_inputs = ApiSerializer.serialize_working_memory_for_api(number_content_memory) - - assert len(pipeline_inputs) == 1 - assert "pi_value" in pipeline_inputs - - number_dict_stuff = pipeline_inputs["pi_value"] - assert number_dict_stuff["concept"] == "Number" - assert number_dict_stuff["content"] == {"number": 3.14159} diff --git a/tests/unit/pipelex/cogt/models/test_model_deck_references.py b/tests/unit/pipelex/cogt/models/test_model_deck_references.py index 4a72e6298..a612ec92c 100644 --- a/tests/unit/pipelex/cogt/models/test_model_deck_references.py +++ b/tests/unit/pipelex/cogt/models/test_model_deck_references.py @@ -18,7 +18,7 @@ from pipelex.cogt.models.model_deck_loader import load_model_deck_blueprint from pipelex.cogt.models.model_manager import ModelManager from pipelex.cogt.models.model_reference import ModelReference, ModelReferenceKind -from pipelex.system.configuration.configs import ConfigPaths +from pipelex.system.configuration.config_loader import config_manager from pipelex.system.pipelex_service.remote_config_fetcher import RemoteConfigFetcher from pipelex.tools.misc.file_utils import find_files_in_dir from pipelex.tools.misc.toml_utils import load_toml_from_path_if_exists @@ -35,7 +35,7 @@ class TestModelDeckReferences: @pytest.fixture(scope="class") def model_deck_blueprint(self) -> ModelDeckBlueprint: """Load actual model deck blueprint from TOML files.""" - model_deck_paths = ModelManager.get_model_deck_paths(deck_dir_path=ConfigPaths.MODEL_DECKS_DIR_PATH) + model_deck_paths = ModelManager.get_model_deck_paths(deck_dir_path=config_manager.model_decks_dir_path) return load_model_deck_blueprint(model_deck_paths=model_deck_paths) @pytest.fixture(scope="class") @@ -91,7 +91,7 @@ def _get_local_backend_models(self) -> dict[str, ModelType]: Mapping of model_handle -> ModelType """ known_handles: dict[str, ModelType] = {} - backends_dir = ConfigPaths.BACKENDS_DIR_PATH + backends_dir = config_manager.backends_dir_path toml_files = find_files_in_dir(backends_dir, pattern="*.toml", is_recursive=False) for toml_path in toml_files: diff --git a/tests/unit/pipelex/core/bundles/test_data_pipe_sorter.py b/tests/unit/pipelex/core/bundles/test_data_pipe_sorter.py index a0e56fc68..3dae376d5 100644 --- a/tests/unit/pipelex/core/bundles/test_data_pipe_sorter.py +++ b/tests/unit/pipelex/core/bundles/test_data_pipe_sorter.py @@ -53,7 +53,7 @@ class PipeSorterTestCases: description="D depends on B and C", inputs={}, output="Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="pipe_b", result="result_b"), SubPipeBlueprint(pipe="pipe_c", result="result_c"), ], diff --git a/tests/unit/pipelex/core/bundles/test_pipelex_bundle_blueprint_concept_validation.py b/tests/unit/pipelex/core/bundles/test_pipelex_bundle_blueprint_concept_validation.py index b41eb3cbd..ff7d847a4 100644 --- a/tests/unit/pipelex/core/bundles/test_pipelex_bundle_blueprint_concept_validation.py +++ b/tests/unit/pipelex/core/bundles/test_pipelex_bundle_blueprint_concept_validation.py @@ -225,6 +225,62 @@ def test_valid_item_concept_ref_in_structure(self): ) assert bundle.concept is not None + # ========== HIERARCHICAL DOMAIN CASES ========== + + def test_valid_hierarchical_domain_concept_ref_output(self): + """Hierarchical domain concept ref for same domain should be valid.""" + bundle = PipelexBundleBlueprint( + domain="legal.contracts", + description="Test bundle", + concept={"NonCompeteClause": "A non-compete clause concept"}, + pipe={ + "my_pipe": PipeLLMBlueprint( + type="PipeLLM", + description="Test pipe", + output="legal.contracts.NonCompeteClause", + prompt="Generate something", + ), + }, + ) + assert bundle.concept is not None + + def test_valid_hierarchical_domain_external_concept_ref(self): + """External concept ref from a different hierarchical domain should be skipped.""" + bundle = PipelexBundleBlueprint( + domain="legal.contracts", + description="Test bundle", + pipe={ + "my_pipe": PipeLLMBlueprint( + type="PipeLLM", + description="Test pipe", + inputs={"score": "scoring.WeightedScore"}, + output="Text", + prompt="Process @score", + ), + }, + ) + assert bundle.pipe is not None + + def test_invalid_hierarchical_domain_undeclared_same_domain(self): + """Hierarchical same-domain concept ref that is not declared should raise error.""" + with pytest.raises(ValidationError) as exc_info: + PipelexBundleBlueprint( + domain="legal.contracts", + description="Test bundle", + pipe={ + "my_pipe": PipeLLMBlueprint( + type="PipeLLM", + description="Test pipe", + output="legal.contracts.Missing", + prompt="Generate something", + ), + }, + ) + + error_message = str(exc_info.value) + assert "Missing" in error_message + assert "not declared in domain" in error_message + # ========== INVALID CASES ========== def test_invalid_undeclared_local_concept_in_pipe_output(self): diff --git a/tests/unit/pipelex/core/bundles/test_pipelex_bundle_blueprint_pipe_validation.py b/tests/unit/pipelex/core/bundles/test_pipelex_bundle_blueprint_pipe_validation.py new file mode 100644 index 000000000..65a7264b7 --- /dev/null +++ b/tests/unit/pipelex/core/bundles/test_pipelex_bundle_blueprint_pipe_validation.py @@ -0,0 +1,194 @@ +import pytest +from pydantic import ValidationError + +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.pipe_controllers.batch.pipe_batch_blueprint import PipeBatchBlueprint +from pipelex.pipe_controllers.condition.pipe_condition_blueprint import PipeConditionBlueprint +from pipelex.pipe_controllers.sequence.pipe_sequence_blueprint import PipeSequenceBlueprint +from pipelex.pipe_controllers.sub_pipe_blueprint import SubPipeBlueprint +from pipelex.pipe_operators.llm.pipe_llm_blueprint import PipeLLMBlueprint + + +class TestPipelexBundleBlueprintPipeValidation: + """Test validation of pipe references in PipelexBundleBlueprint.""" + + # ========== VALID CASES ========== + + def test_valid_bare_step_refs_to_local_pipes(self): + """Bare step refs (no domain prefix) should pass without validation at bundle level.""" + bundle = PipelexBundleBlueprint( + domain="my_domain", + description="Test bundle", + concept={"Result": "A result concept"}, + pipe={ + "step1": PipeLLMBlueprint( + type="PipeLLM", + description="Step 1", + output="Text", + prompt="Hello", + ), + "step2": PipeLLMBlueprint( + type="PipeLLM", + description="Step 2", + output="Result", + prompt="Process", + ), + "my_sequence": PipeSequenceBlueprint( + type="PipeSequence", + description="Main sequence", + output="Result", + steps=[ + SubPipeBlueprint(pipe="step1", result="intermediate"), + SubPipeBlueprint(pipe="step2", result="final"), + ], + ), + }, + ) + assert bundle.pipe is not None + + def test_valid_external_pipe_ref_in_sequence(self): + """External domain-qualified pipe ref should be skipped (not validated locally).""" + bundle = PipelexBundleBlueprint( + domain="orchestration", + description="Test bundle", + pipe={ + "my_sequence": PipeSequenceBlueprint( + type="PipeSequence", + description="Orchestration sequence", + output="Text", + steps=[ + SubPipeBlueprint(pipe="scoring.compute_score", result="score"), + ], + ), + }, + ) + assert bundle.pipe is not None + + def test_valid_special_outcomes_not_treated_as_pipe_refs(self): + """Special outcomes like 'fail' and 'continue' should not be validated as pipe refs.""" + bundle = PipelexBundleBlueprint( + domain="my_domain", + description="Test bundle", + concept={"Result": "A result concept"}, + pipe={ + "good_pipe": PipeLLMBlueprint( + type="PipeLLM", + description="Good pipe", + output="Result", + prompt="Do something", + ), + "my_condition": PipeConditionBlueprint( + type="PipeCondition", + description="Condition check", + output="Result", + expression="True", + outcomes={"True": "good_pipe"}, + default_outcome="fail", + ), + }, + ) + assert bundle.pipe is not None + + def test_valid_external_batch_pipe_ref(self): + """External domain-qualified branch_pipe_code should be skipped.""" + bundle = PipelexBundleBlueprint( + domain="orchestration", + description="Test bundle", + pipe={ + "my_batch": PipeBatchBlueprint( + type="PipeBatch", + description="Batch process", + output="Text[]", + inputs={"items": "Text[]"}, + branch_pipe_code="scoring.process_item", + input_list_name="items", + input_item_name="item", + ), + }, + ) + assert bundle.pipe is not None + + def test_valid_bare_ref_to_nonexistent_pipe(self): + """Bare refs to pipes not declared locally should pass (deferred to package-level).""" + bundle = PipelexBundleBlueprint( + domain="my_domain", + description="Test bundle", + pipe={ + "my_sequence": PipeSequenceBlueprint( + type="PipeSequence", + description="Main sequence", + output="Text", + steps=[ + SubPipeBlueprint(pipe="nonexistent_step", result="something"), + ], + ), + }, + ) + assert bundle.pipe is not None + + # ========== INVALID CASES ========== + + def test_invalid_same_domain_pipe_ref_to_nonexistent_pipe(self): + """Same-domain qualified pipe ref to a non-existent pipe should raise error.""" + with pytest.raises(ValidationError) as exc_info: + PipelexBundleBlueprint( + domain="my_domain", + description="Test bundle", + pipe={ + "my_sequence": PipeSequenceBlueprint( + type="PipeSequence", + description="Main sequence", + output="Text", + steps=[ + SubPipeBlueprint(pipe="my_domain.nonexistent_pipe", result="something"), + ], + ), + }, + ) + + error_message = str(exc_info.value) + assert "my_domain.nonexistent_pipe" in error_message + assert "not declared in domain" in error_message + + def test_invalid_same_domain_batch_pipe_ref(self): + """Same-domain qualified branch_pipe_code to non-existent pipe should raise error.""" + with pytest.raises(ValidationError) as exc_info: + PipelexBundleBlueprint( + domain="my_domain", + description="Test bundle", + pipe={ + "my_batch": PipeBatchBlueprint( + type="PipeBatch", + description="Batch process", + output="Text[]", + inputs={"items": "Text[]"}, + branch_pipe_code="my_domain.nonexistent_branch", + input_list_name="items", + input_item_name="item", + ), + }, + ) + + error_message = str(exc_info.value) + assert "my_domain.nonexistent_branch" in error_message + + def test_invalid_same_domain_condition_outcome_ref(self): + """Same-domain qualified outcome pipe ref to non-existent pipe should raise error.""" + with pytest.raises(ValidationError) as exc_info: + PipelexBundleBlueprint( + domain="my_domain", + description="Test bundle", + pipe={ + "my_condition": PipeConditionBlueprint( + type="PipeCondition", + description="Condition check", + output="Text", + expression="True", + outcomes={"True": "my_domain.nonexistent_handler"}, + default_outcome="fail", + ), + }, + ) + + error_message = str(exc_info.value) + assert "my_domain.nonexistent_handler" in error_message diff --git a/tests/unit/pipelex/core/concepts/helpers/test_get_structure_class_name_from_blueprint.py b/tests/unit/pipelex/core/concepts/helpers/test_get_structure_class_name_from_blueprint.py index 0355063da..04b1e09f0 100644 --- a/tests/unit/pipelex/core/concepts/helpers/test_get_structure_class_name_from_blueprint.py +++ b/tests/unit/pipelex/core/concepts/helpers/test_get_structure_class_name_from_blueprint.py @@ -91,13 +91,13 @@ def test_invalid_concept_ref_or_code_raises_error(self): concept_ref_or_code="invalid_lowercase_code", ) - def test_invalid_nested_domain_raises_error(self): - """Nested domain format (more than one dot) raises ValueError.""" - with pytest.raises(ValueError, match="Invalid concept_ref_or_code"): - get_structure_class_name_from_blueprint( - blueprint_or_string_description="A description", - concept_ref_or_code="domain.subdomain.ConceptName", - ) + def test_hierarchical_domain_extracts_concept_code(self): + """Hierarchical domain format (multiple dots) extracts the concept code correctly.""" + result = get_structure_class_name_from_blueprint( + blueprint_or_string_description="A description", + concept_ref_or_code="domain.subdomain.ConceptName", + ) + assert result == "ConceptName" def test_empty_string_raises_error(self): """Empty string raises ValueError.""" diff --git a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py index f2ce7b608..180c2082c 100644 --- a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py +++ b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py @@ -29,7 +29,7 @@ def test_simple_structure_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -85,7 +85,7 @@ def test_complex_types_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -127,7 +127,7 @@ def test_choices_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -181,7 +181,7 @@ def test_typed_choices_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -217,7 +217,7 @@ def test_empty_structure(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -253,7 +253,7 @@ def test_concept_get_structure_method(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -290,7 +290,7 @@ def test_generate_from_blueprint_dict_function(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -342,7 +342,7 @@ def test_all_field_types(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -384,7 +384,7 @@ def test_required_vs_optional_fields(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -438,7 +438,7 @@ def test_default_values(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -493,7 +493,7 @@ def test_nested_list_types(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -551,7 +551,7 @@ def test_nested_dict_types(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -615,7 +615,7 @@ def test_mixed_complexity_structure(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -665,7 +665,7 @@ def test_mixed_structure_blueprint_normalization(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -711,7 +711,7 @@ def test_code_validation_success(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -788,7 +788,7 @@ def test_inheritance_from_text_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -850,7 +850,7 @@ def test_inheritance_from_image_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -904,7 +904,7 @@ def test_inheritance_from_number_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -957,7 +957,7 @@ def test_inheritance_from_json_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -1004,7 +1004,7 @@ def test_inheritance_with_empty_structure(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -1060,7 +1060,7 @@ def test_inheritance_from_document_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py index a8c2c26cb..fbbef723d 100644 --- a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py +++ b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py @@ -14,7 +14,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py index b0d1565a3..b7ca21a7e 100644 --- a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py +++ b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py @@ -39,7 +39,7 @@ def test_escape_double_quotes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -84,7 +84,7 @@ def test_escape_single_quotes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -129,7 +129,7 @@ def test_escape_mixed_quotes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -174,7 +174,7 @@ def test_escape_backslashes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -219,7 +219,7 @@ def test_escape_newlines_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -264,7 +264,7 @@ def test_escape_tabs_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -310,7 +310,7 @@ def test_escape_multiple_special_characters_combined(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -356,7 +356,7 @@ def test_escape_default_value_with_quotes(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -404,7 +404,7 @@ def test_escape_default_value_with_backslashes(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -449,7 +449,7 @@ def test_empty_string_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -500,7 +500,7 @@ def test_very_long_description_with_quotes(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -545,7 +545,7 @@ def test_unicode_characters_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -590,7 +590,7 @@ def test_carriage_return_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -649,7 +649,7 @@ def test_multiple_fields_with_various_escaping_needs(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/unit/pipelex/core/concepts/test_concept.py b/tests/unit/pipelex/core/concepts/test_concept.py index cd1699b87..07a6533b5 100644 --- a/tests/unit/pipelex/core/concepts/test_concept.py +++ b/tests/unit/pipelex/core/concepts/test_concept.py @@ -1,5 +1,6 @@ import pytest from kajson.kajson_manager import KajsonManager +from pydantic import ValidationError from pipelex.cogt.image.image_size import ImageSize from pipelex.core.concepts.concept import Concept @@ -205,12 +206,9 @@ def test_validate_concept_ref(self): with pytest.raises(ConceptStringError): validate_concept_ref(f"snake_case_domaiN.{valid_concept_code}") - # Multiple dots - with pytest.raises(ConceptStringError): - validate_concept_ref(f"domain.sub.{valid_concept_code}") - - with pytest.raises(ConceptStringError): - validate_concept_ref(f"a.b.c.{valid_concept_code}") + # Hierarchical domains (multiple dots) - now valid + validate_concept_ref(f"domain.sub.{valid_concept_code}") + validate_concept_ref(f"a.b.c.{valid_concept_code}") # Invalid domain (not snake_case) with pytest.raises(ConceptStringError): @@ -238,6 +236,41 @@ def test_validate_concept_ref(self): with pytest.raises(ConceptStringError): validate_concept_ref(f"{valid_domain}.text-name") + @pytest.mark.parametrize( + "domain_code", + [ + "scoring_lib->scoring", + "my_lib->legal.contracts", + ], + ) + def test_concept_with_cross_package_domain_code(self, domain_code: str): + """Concept construction with a cross-package domain code should pass validation.""" + concept = Concept( + code="WeightedScore", + domain_code=domain_code, + description="Test concept", + structure_class_name="TextContent", + ) + assert concept.domain_code == domain_code + + @pytest.mark.parametrize( + "domain_code", + [ + "lib->", + "lib->Legal", + "lib->.scoring", + ], + ) + def test_concept_with_invalid_cross_package_domain_code(self, domain_code: str): + """Concept construction with an invalid cross-package domain code should raise.""" + with pytest.raises(ValidationError): + Concept( + code="WeightedScore", + domain_code=domain_code, + description="Test concept", + structure_class_name="TextContent", + ) + def test_are_concept_compatible(self): concept1 = ConceptFactory.make_from_blueprint( concept_code="Code1", diff --git a/tests/unit/pipelex/core/concepts/test_concept_cross_package_refines.py b/tests/unit/pipelex/core/concepts/test_concept_cross_package_refines.py new file mode 100644 index 000000000..80656ac3e --- /dev/null +++ b/tests/unit/pipelex/core/concepts/test_concept_cross_package_refines.py @@ -0,0 +1,141 @@ +from pipelex.core.concepts.concept import Concept + + +def _make_concept(code: str, domain_code: str, refines: str | None = None) -> Concept: + """Create a minimal Concept for testing.""" + return Concept( + code=code, + domain_code=domain_code, + description="Test concept", + structure_class_name="TextContent", + refines=refines, + ) + + +class TestConceptCrossPackageRefines: + """Tests for cross-package refinement compatibility in Concept.are_concept_compatible().""" + + def test_refines_cross_package_with_resolver_compatible(self): + """Concept refining cross-package concept is compatible when resolver resolves to target.""" + refining = _make_concept(code="RefinedScore", domain_code="my_domain", refines="scoring_dep->scoring.WeightedScore") + target = _make_concept(code="WeightedScore", domain_code="scoring") + + def resolver(concept_ref: str) -> Concept | None: + if concept_ref == "scoring_dep->scoring.WeightedScore": + return target + return None + + assert Concept.are_concept_compatible(concept_1=refining, concept_2=target, concept_resolver=resolver) is True + + def test_refines_cross_package_without_resolver_not_compatible(self): + """Cross-package refines without a resolver is not compatible via refines check.""" + refining = _make_concept(code="RefinedScore", domain_code="my_domain", refines="scoring_dep->scoring.WeightedScore") + target = _make_concept(code="WeightedScore", domain_code="scoring") + + # Without resolver, the cross-package refines string won't match the target concept_ref + # They might still be compatible via structure_class_name (both TextContent) + # but the refines-based check specifically won't match + result = Concept.are_concept_compatible(concept_1=refining, concept_2=target) + # Compatible due to same structure_class_name, not due to refines resolution + assert result is True + + def test_refines_cross_package_different_structure_without_resolver(self): + """Cross-package refines with different structures, without resolver.""" + refining = Concept( + code="RefinedScore", + domain_code="my_domain", + description="Refined", + structure_class_name="RefinedScoreContent", + refines="scoring_dep->scoring.WeightedScore", + ) + target = Concept( + code="WeightedScore", + domain_code="scoring", + description="Target", + structure_class_name="WeightedScoreContent", + ) + + # Without resolver, and different structure_class_name, not compatible via refines + result = Concept.are_concept_compatible(concept_1=refining, concept_2=target) + assert result is False + + def test_refines_cross_package_different_structure_with_resolver(self): + """Cross-package refines with different structures, but resolver resolves correctly.""" + refining = Concept( + code="RefinedScore", + domain_code="my_domain", + description="Refined", + structure_class_name="RefinedScoreContent", + refines="scoring_dep->scoring.WeightedScore", + ) + target = Concept( + code="WeightedScore", + domain_code="scoring", + description="Target", + structure_class_name="WeightedScoreContent", + ) + + def resolver(concept_ref: str) -> Concept | None: + if concept_ref == "scoring_dep->scoring.WeightedScore": + return target + return None + + result = Concept.are_concept_compatible(concept_1=refining, concept_2=target, concept_resolver=resolver) + assert result is True + + def test_both_refine_same_cross_package_concept_siblings(self): + """Two concepts that both refine the same cross-package concept are siblings.""" + base = _make_concept(code="BaseScore", domain_code="scoring") + + sibling_a = Concept( + code="ScoreA", + domain_code="my_domain", + description="Sibling A", + structure_class_name="ScoreAContent", + refines="scoring_dep->scoring.BaseScore", + ) + sibling_b = Concept( + code="ScoreB", + domain_code="my_domain", + description="Sibling B", + structure_class_name="ScoreBContent", + refines="scoring_dep->scoring.BaseScore", + ) + + def resolver(concept_ref: str) -> Concept | None: + if concept_ref == "scoring_dep->scoring.BaseScore": + return base + return None + + result = Concept.are_concept_compatible(concept_1=sibling_a, concept_2=sibling_b, concept_resolver=resolver) + assert result is True + + def test_resolver_returns_none_not_compatible(self): + """When resolver returns None for a cross-package refines, not compatible via refines.""" + refining = Concept( + code="RefinedScore", + domain_code="my_domain", + description="Refined", + structure_class_name="RefinedScoreContent", + refines="unknown_dep->scoring.Missing", + ) + target = Concept( + code="WeightedScore", + domain_code="scoring", + description="Target", + structure_class_name="WeightedScoreContent", + ) + + def resolver(_concept_ref: str) -> Concept | None: + return None + + result = Concept.are_concept_compatible(concept_1=refining, concept_2=target, concept_resolver=resolver) + assert result is False + + def test_local_refines_unaffected(self): + """Local (non-cross-package) refines still works without resolver.""" + base = _make_concept(code="BaseScore", domain_code="scoring") + refining = _make_concept(code="DetailedScore", domain_code="scoring", refines="scoring.BaseScore") + + result = Concept.are_concept_compatible(concept_1=refining, concept_2=base) + assert result is True diff --git a/tests/unit/pipelex/core/concepts/test_validation.py b/tests/unit/pipelex/core/concepts/test_validation.py index c631746d3..ae0ebb669 100644 --- a/tests/unit/pipelex/core/concepts/test_validation.py +++ b/tests/unit/pipelex/core/concepts/test_validation.py @@ -39,6 +39,11 @@ def test_is_concept_code_valid(self, concept_code: str, expected: bool): ("crm.Customer", True), ("my_app.Entity", True), ("domain.A", True), + # Hierarchical domains + ("legal.contracts.NonCompeteClause", True), + ("legal.contracts.shareholder.Agreement", True), + ("a.b.c.D", True), + # Invalid ("native.text", False), ("NATIVE.Text", False), ("my-app.Entity", False), @@ -63,12 +68,13 @@ def test_is_concept_ref_valid(self, concept_ref: str, expected: bool): ("myapp.BaseEntity", True), ("crm.Customer", True), ("my_app.Entity", True), + # Valid - hierarchical domain refs (now supported) + ("org.dept.team.Entity", True), + ("a.b.c.D", True), + ("legal.contracts.NonCompeteClause", True), # Invalid - lowercase bare code ("somecustomconcept", False), ("text", False), - # Invalid - deeply nested domain - ("org.dept.team.Entity", False), - ("a.b.c.D", False), # Invalid - hyphenated domain ("my-app.Entity", False), # Invalid - empty string diff --git a/tests/unit/pipelex/core/domains/test_domain_validation.py b/tests/unit/pipelex/core/domains/test_domain_validation.py new file mode 100644 index 000000000..80282f849 --- /dev/null +++ b/tests/unit/pipelex/core/domains/test_domain_validation.py @@ -0,0 +1,50 @@ +import pytest + +from pipelex.core.domains.validation import is_domain_code_valid + + +class TestDomainValidation: + """Test domain code validation including hierarchical dotted paths.""" + + @pytest.mark.parametrize( + ("code", "expected"), + [ + # Single-segment domains + ("legal", True), + ("my_app", True), + ("native", True), + ("a", True), + # Hierarchical domains + ("legal.contracts", True), + ("legal.contracts.shareholder", True), + ("a.b.c", True), + ("my_app.sub_domain", True), + # Cross-package domain codes + ("scoring_lib->scoring", True), + ("my_lib->legal.contracts", True), + ("alias->a.b.c", True), + ("lib->native", True), + # Cross-package with invalid remainder + ("lib->Legal", False), + ("lib->", False), + ("lib->legal.", False), + ("lib->.legal", False), + ("lib->legal..contracts", False), + # Invalid + ("Legal", False), + ("legal.", False), + (".legal", False), + ("legal..contracts", False), + ("legal-contracts", False), + ("", False), + ("123abc", False), + ("UPPER", False), + ("legal.Contracts", False), + ("legal.contracts.", False), + (".legal.contracts", False), + ("legal..contracts.shareholder", False), + ], + ) + def test_is_domain_code_valid(self, code: str, expected: bool): + """Test domain code validation accepts hierarchical dotted paths.""" + assert is_domain_code_valid(code=code) == expected diff --git a/tests/unit/pipelex/core/interpreter/test_interpreter.py b/tests/unit/pipelex/core/interpreter/test_interpreter.py index a351aa42d..4297537b1 100644 --- a/tests/unit/pipelex/core/interpreter/test_interpreter.py +++ b/tests/unit/pipelex/core/interpreter/test_interpreter.py @@ -7,18 +7,18 @@ class TestPipelexInterpreter: - @pytest.mark.parametrize(("test_name", "plx_content", "expected_blueprint"), InterpreterTestCases.VALID_TEST_CASES) - def test_make_pipelex_bundle_blueprint(self, test_name: str, plx_content: str, expected_blueprint: PipelexBundleBlueprint): - """Test making blueprint from various valid PLX content.""" - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + @pytest.mark.parametrize(("test_name", "mthds_content", "expected_blueprint"), InterpreterTestCases.VALID_TEST_CASES) + def test_make_pipelex_bundle_blueprint(self, test_name: str, mthds_content: str, expected_blueprint: PipelexBundleBlueprint): + """Test making blueprint from various valid MTHDS content.""" + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) pretty_print(blueprint, title=f"Blueprint {test_name}") pretty_print(expected_blueprint, title=f"Expected blueprint {test_name}") assert blueprint == expected_blueprint - @pytest.mark.parametrize(("test_name", "invalid_plx_content", "expected_exception"), InterpreterTestCases.ERROR_TEST_CASES) - def test_invalid_plx_should_raise_exception(self, test_name: str, invalid_plx_content: str, expected_exception: type[Exception]): - """Test that invalid PLX content raises appropriate exceptions.""" - log.verbose(f"Testing invalid PLX content: {test_name}") + @pytest.mark.parametrize(("test_name", "invalid_mthds_content", "expected_exception"), InterpreterTestCases.ERROR_TEST_CASES) + def test_invalid_mthds_should_raise_exception(self, test_name: str, invalid_mthds_content: str, expected_exception: type[Exception]): + """Test that invalid MTHDS content raises appropriate exceptions.""" + log.verbose(f"Testing invalid MTHDS content: {test_name}") with pytest.raises(expected_exception): - PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=invalid_plx_content) + PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=invalid_mthds_content) diff --git a/tests/unit/pipelex/core/packages/graph/test_chain_formatter.py b/tests/unit/pipelex/core/packages/graph/test_chain_formatter.py new file mode 100644 index 000000000..3ee6ffb13 --- /dev/null +++ b/tests/unit/pipelex/core/packages/graph/test_chain_formatter.py @@ -0,0 +1,93 @@ +from pipelex.core.packages.graph.chain_formatter import format_chain_as_mthds_snippet +from pipelex.core.packages.graph.graph_builder import build_know_how_graph +from pipelex.core.packages.graph.models import ( + NATIVE_PACKAGE_ADDRESS, + ConceptId, + PipeNode, +) +from tests.unit.pipelex.core.packages.graph.test_data import ( + LEGAL_TOOLS_ADDRESS, + SCORING_LIB_ADDRESS, + make_test_package_index, +) + +NATIVE_TEXT_ID = ConceptId(package_address=NATIVE_PACKAGE_ADDRESS, concept_ref="native.Text") +LEGAL_CONCEPT_ID = ConceptId(package_address=LEGAL_TOOLS_ADDRESS, concept_ref="pkg_test_legal.PkgTestContractClause") + + +def _build_graph_and_resolve(pipe_keys: list[str]) -> list[PipeNode]: + """Build graph from test index and resolve pipe node_keys to PipeNodes.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + pipe_nodes: list[PipeNode] = [] + for key in pipe_keys: + node = graph.get_pipe_node(key) + assert node is not None, f"Pipe node not found: {key}" + pipe_nodes.append(node) + return pipe_nodes + + +class TestChainFormatter: + """Tests for the MTHDS chain composition formatter.""" + + def test_format_single_step_chain(self) -> None: + """Single-step chain shows Step 1 with correct pipe info, no cross-package note.""" + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + chain_pipes = _build_graph_and_resolve([extract_key]) + + result = format_chain_as_mthds_snippet(chain_pipes, NATIVE_TEXT_ID, LEGAL_CONCEPT_ID) + + assert "Step 1: pkg_test_extract_clause" in result + assert "Step 2" not in result + assert LEGAL_TOOLS_ADDRESS in result + assert "pkg_test_legal" in result + assert "native.Text" in result + assert "pkg_test_legal.PkgTestContractClause" in result + assert "Note:" not in result + + def test_format_two_step_same_package(self) -> None: + """Two-step same-package chain shows both steps with correct wiring, no cross-package note.""" + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + analyze_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + chain_pipes = _build_graph_and_resolve([extract_key, analyze_key]) + + result = format_chain_as_mthds_snippet(chain_pipes, NATIVE_TEXT_ID, NATIVE_TEXT_ID) + + assert "Step 1: pkg_test_extract_clause" in result + assert "Step 2: pkg_test_analyze_clause" in result + # Step 1 output should feed into step 2 input + assert "pkg_test_legal.PkgTestContractClause" in result + assert "Note:" not in result + + def test_format_cross_package_chain(self) -> None: + """Chain spanning multiple packages includes the cross-package note.""" + analyze_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + score_key = f"{SCORING_LIB_ADDRESS}::pkg_test_compute_score" + chain_pipes = _build_graph_and_resolve([analyze_key, score_key]) + + result = format_chain_as_mthds_snippet(chain_pipes, LEGAL_CONCEPT_ID, LEGAL_CONCEPT_ID) + + assert "Note: This chain spans multiple packages" in result + + def test_format_empty_chain(self) -> None: + """Empty chain list returns empty string.""" + result = format_chain_as_mthds_snippet([], NATIVE_TEXT_ID, NATIVE_TEXT_ID) + assert result == "" + + def test_format_header_shows_concept_flow(self) -> None: + """Composition header line shows from -> intermediate -> to concept refs.""" + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + analyze_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + chain_pipes = _build_graph_and_resolve([extract_key, analyze_key]) + + result = format_chain_as_mthds_snippet(chain_pipes, NATIVE_TEXT_ID, NATIVE_TEXT_ID) + + header_line = result.split("\n")[0] + assert header_line.startswith("Composition:") + assert "native.Text" in header_line + assert "pkg_test_legal.PkgTestContractClause" in header_line + # Final output should also be in the header + parts = header_line.split(" -> ") + assert parts[0] == "Composition: native.Text" + assert parts[1] == "pkg_test_legal.PkgTestContractClause" + assert parts[2] == "native.Text" diff --git a/tests/unit/pipelex/core/packages/graph/test_data.py b/tests/unit/pipelex/core/packages/graph/test_data.py new file mode 100644 index 000000000..08e5ef74e --- /dev/null +++ b/tests/unit/pipelex/core/packages/graph/test_data.py @@ -0,0 +1,499 @@ +"""Shared test data for know-how graph tests. + +Builds a test PackageIndex with 4 packages: + +| Package | Address | Concepts | Pipes (exported) | +|---------------|--------------------------------------|---------------------------|---------------------------------------------------------------| +| scoring-lib | github.com/pkg_test/scoring-lib | PkgTestWeightedScore | pkg_test_compute_score (Text -> PkgTestWeightedScore) | +| refining-app | github.com/pkg_test/refining-app | PkgTestRefinedScore | pkg_test_refine_score (Text -> PkgTestRefinedScore) | +| | | (refines scoring's WS) | | +| legal-tools | github.com/pkg_test/legal-tools | PkgTestContractClause | pkg_test_extract_clause (Text -> PkgTestContractClause) | +| | | | pkg_test_analyze_clause (PkgTestContractClause -> Text) | +| analytics-lib | github.com/pkg_test/analytics-lib | PkgTestWeightedScore | pkg_test_compute_analytics (Text -> PkgTestWeightedScore) | +| | | (same code, different pkg)| | +""" + +from pipelex.core.packages.index.models import ( + ConceptEntry, + DomainEntry, + PackageIndex, + PackageIndexEntry, + PipeSignature, +) + +SCORING_LIB_ADDRESS = "github.com/pkg_test/scoring-lib" +REFINING_APP_ADDRESS = "github.com/pkg_test/refining-app" +LEGAL_TOOLS_ADDRESS = "github.com/pkg_test/legal-tools" +ANALYTICS_LIB_ADDRESS = "github.com/pkg_test/analytics-lib" +PHANTOM_PKG_ADDRESS = "github.com/pkg_test/phantom-pkg" +QUALIFIED_REF_ADDRESS = "github.com/pkg_test/qualified-ref-pkg" +MALFORMED_REF_ADDRESS = "github.com/pkg_test/malformed-ref-pkg" +MULTI_DOMAIN_PKG_ADDRESS = "github.com/pkg_test/multi-domain-pkg" +MULTI_DOMAIN_CONSUMER_ADDRESS = "github.com/pkg_test/multi-domain-consumer" + + +def make_test_package_index() -> PackageIndex: + """Build a PackageIndex with 4 test packages for graph tests.""" + index = PackageIndex() + + # --- scoring-lib --- + scoring_lib = PackageIndexEntry( + address=SCORING_LIB_ADDRESS, + version="1.0.0", + description="Scoring library", + domains=[DomainEntry(domain_code="pkg_test_scoring_dep")], + concepts=[ + ConceptEntry( + concept_code="PkgTestWeightedScore", + domain_code="pkg_test_scoring_dep", + concept_ref="pkg_test_scoring_dep.PkgTestWeightedScore", + description="A weighted score", + structure_fields=["score_value", "weight"], + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_compute_score", + pipe_type="PipeLLM", + domain_code="pkg_test_scoring_dep", + description="Compute weighted score from text", + input_specs={"text": "Text"}, + output_spec="PkgTestWeightedScore", + is_exported=True, + ), + ], + ) + index.add_entry(scoring_lib) + + # --- refining-app (depends on scoring-lib, refines its concept) --- + refining_app = PackageIndexEntry( + address=REFINING_APP_ADDRESS, + version="1.0.0", + description="Refining application", + domains=[DomainEntry(domain_code="pkg_test_refining")], + concepts=[ + ConceptEntry( + concept_code="PkgTestRefinedScore", + domain_code="pkg_test_refining", + concept_ref="pkg_test_refining.PkgTestRefinedScore", + description="A refined score", + refines="scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore", + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_refine_score", + pipe_type="PipeLLM", + domain_code="pkg_test_refining", + description="Refine a score from text", + input_specs={"text": "Text"}, + output_spec="PkgTestRefinedScore", + is_exported=True, + ), + ], + dependencies=[SCORING_LIB_ADDRESS], + dependency_aliases={"scoring_dep": SCORING_LIB_ADDRESS}, + ) + index.add_entry(refining_app) + + # --- legal-tools --- + legal_tools = PackageIndexEntry( + address=LEGAL_TOOLS_ADDRESS, + version="1.0.0", + description="Legal document analysis tools", + domains=[DomainEntry(domain_code="pkg_test_legal")], + concepts=[ + ConceptEntry( + concept_code="PkgTestContractClause", + domain_code="pkg_test_legal", + concept_ref="pkg_test_legal.PkgTestContractClause", + description="A clause from a contract", + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_extract_clause", + pipe_type="PipeLLM", + domain_code="pkg_test_legal", + description="Extract clause from text", + input_specs={"text": "Text"}, + output_spec="PkgTestContractClause", + is_exported=True, + ), + PipeSignature( + pipe_code="pkg_test_analyze_clause", + pipe_type="PipeLLM", + domain_code="pkg_test_legal", + description="Analyze a contract clause", + input_specs={"clause": "PkgTestContractClause"}, + output_spec="Text", + is_exported=True, + ), + ], + ) + index.add_entry(legal_tools) + + # --- analytics-lib (same concept code PkgTestWeightedScore but different package) --- + analytics_lib = PackageIndexEntry( + address=ANALYTICS_LIB_ADDRESS, + version="1.0.0", + description="Analytics library", + domains=[DomainEntry(domain_code="pkg_test_analytics")], + concepts=[ + ConceptEntry( + concept_code="PkgTestWeightedScore", + domain_code="pkg_test_analytics", + concept_ref="pkg_test_analytics.PkgTestWeightedScore", + description="An analytics weighted score", + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_compute_analytics", + pipe_type="PipeLLM", + domain_code="pkg_test_analytics", + description="Compute analytics score from text", + input_specs={"text": "Text"}, + output_spec="PkgTestWeightedScore", + is_exported=True, + ), + ], + ) + index.add_entry(analytics_lib) + + return index + + +def make_test_package_index_with_unresolvable_concepts() -> PackageIndex: + """Build a PackageIndex containing pipes with unresolvable concept references. + + Creates a package with: + - One valid concept (PkgTestValidConcept) + - One pipe with a valid output concept (pkg_test_valid_pipe) + - One pipe whose output references a nonexistent concept (pkg_test_bad_output_pipe) + - One pipe whose input references a nonexistent concept (pkg_test_bad_input_pipe) + """ + index = PackageIndex() + + phantom_pkg = PackageIndexEntry( + address=PHANTOM_PKG_ADDRESS, + version="1.0.0", + description="Package with unresolvable concept references", + domains=[DomainEntry(domain_code="pkg_test_phantom")], + concepts=[ + ConceptEntry( + concept_code="PkgTestValidConcept", + domain_code="pkg_test_phantom", + concept_ref="pkg_test_phantom.PkgTestValidConcept", + description="A valid concept", + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_valid_pipe", + pipe_type="PipeLLM", + domain_code="pkg_test_phantom", + description="Valid pipe with resolvable concepts", + input_specs={"text": "Text"}, + output_spec="PkgTestValidConcept", + is_exported=True, + ), + PipeSignature( + pipe_code="pkg_test_bad_output_pipe", + pipe_type="PipeLLM", + domain_code="pkg_test_phantom", + description="Pipe with unresolvable output concept", + input_specs={"text": "Text"}, + output_spec="NonExistentOutputConcept", + is_exported=True, + ), + PipeSignature( + pipe_code="pkg_test_bad_input_pipe", + pipe_type="PipeLLM", + domain_code="pkg_test_phantom", + description="Pipe with unresolvable input concept", + input_specs={"data": "NonExistentInputConcept"}, + output_spec="PkgTestValidConcept", + is_exported=True, + ), + ], + ) + index.add_entry(phantom_pkg) + + return index + + +def make_test_package_index_with_qualified_concept_specs() -> PackageIndex: + """Build a PackageIndex with pipes that use domain-qualified and cross-package concept specs. + + Creates: + - scoring-lib with PkgTestWeightedScore in domain pkg_test_scoring_dep + - qualified-ref-pkg that: + - Has its own concept PkgTestLocalResult in domain pkg_test_qualified + - Depends on scoring-lib (alias: scoring_dep) + - Has a pipe using a domain-qualified output spec (pkg_test_qualified.PkgTestLocalResult) + - Has a pipe using a cross-package input spec (scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore) + """ + index = PackageIndex() + + # scoring-lib (dependency) + scoring_lib = PackageIndexEntry( + address=SCORING_LIB_ADDRESS, + version="1.0.0", + description="Scoring library", + domains=[DomainEntry(domain_code="pkg_test_scoring_dep")], + concepts=[ + ConceptEntry( + concept_code="PkgTestWeightedScore", + domain_code="pkg_test_scoring_dep", + concept_ref="pkg_test_scoring_dep.PkgTestWeightedScore", + description="A weighted score", + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_compute_score", + pipe_type="PipeLLM", + domain_code="pkg_test_scoring_dep", + description="Compute score from text", + input_specs={"text": "Text"}, + output_spec="PkgTestWeightedScore", + is_exported=True, + ), + ], + ) + index.add_entry(scoring_lib) + + # qualified-ref-pkg (consumer with qualified concept specs) + qualified_ref_pkg = PackageIndexEntry( + address=QUALIFIED_REF_ADDRESS, + version="1.0.0", + description="Package using qualified concept references in pipes", + domains=[DomainEntry(domain_code="pkg_test_qualified")], + concepts=[ + ConceptEntry( + concept_code="PkgTestLocalResult", + domain_code="pkg_test_qualified", + concept_ref="pkg_test_qualified.PkgTestLocalResult", + description="A local result concept", + ), + ], + pipes=[ + # Pipe with domain-qualified output spec + PipeSignature( + pipe_code="pkg_test_produce_result", + pipe_type="PipeLLM", + domain_code="pkg_test_qualified", + description="Produce a local result from text", + input_specs={"text": "Text"}, + output_spec="pkg_test_qualified.PkgTestLocalResult", + is_exported=True, + ), + # Pipe with cross-package input spec + PipeSignature( + pipe_code="pkg_test_consume_score", + pipe_type="PipeLLM", + domain_code="pkg_test_qualified", + description="Consume a cross-package weighted score", + input_specs={"score": "scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore"}, + output_spec="Text", + is_exported=True, + ), + # Pipe with cross-package output spec + PipeSignature( + pipe_code="pkg_test_forward_score", + pipe_type="PipeLLM", + domain_code="pkg_test_qualified", + description="Forward a cross-package score", + input_specs={"text": "Text"}, + output_spec="scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore", + is_exported=True, + ), + ], + dependencies=[SCORING_LIB_ADDRESS], + dependency_aliases={"scoring_dep": SCORING_LIB_ADDRESS}, + ) + index.add_entry(qualified_ref_pkg) + + return index + + +def make_test_package_index_with_malformed_cross_package_ref() -> PackageIndex: + """Build a PackageIndex with a pipe whose cross-package remainder is malformed. + + Creates a package with: + - One valid concept (PkgTestValidConcept) + - One valid pipe (pkg_test_valid_pipe) that uses bare concept codes + - One pipe (pkg_test_malformed_ref_pipe) whose output spec is a cross-package ref + with a malformed remainder (e.g. "scoring_dep->..BadRef") that would cause + QualifiedRefError if not caught + - scoring-lib as a dependency so the alias resolves + """ + index = PackageIndex() + + # scoring-lib (dependency) + scoring_lib = PackageIndexEntry( + address=SCORING_LIB_ADDRESS, + version="1.0.0", + description="Scoring library", + domains=[DomainEntry(domain_code="pkg_test_scoring_dep")], + concepts=[ + ConceptEntry( + concept_code="PkgTestWeightedScore", + domain_code="pkg_test_scoring_dep", + concept_ref="pkg_test_scoring_dep.PkgTestWeightedScore", + description="A weighted score", + ), + ], + pipes=[], + ) + index.add_entry(scoring_lib) + + malformed_pkg = PackageIndexEntry( + address=MALFORMED_REF_ADDRESS, + version="1.0.0", + description="Package with malformed cross-package refs", + domains=[DomainEntry(domain_code="pkg_test_malformed")], + concepts=[ + ConceptEntry( + concept_code="PkgTestValidConcept", + domain_code="pkg_test_malformed", + concept_ref="pkg_test_malformed.PkgTestValidConcept", + description="A valid concept", + ), + ], + pipes=[ + # Valid pipe β€” should survive even if sibling has malformed ref + PipeSignature( + pipe_code="pkg_test_valid_pipe", + pipe_type="PipeLLM", + domain_code="pkg_test_malformed", + description="Valid pipe with resolvable concepts", + input_specs={"text": "Text"}, + output_spec="PkgTestValidConcept", + is_exported=True, + ), + # Malformed cross-package ref: remainder starts with ".." + PipeSignature( + pipe_code="pkg_test_malformed_ref_pipe", + pipe_type="PipeLLM", + domain_code="pkg_test_malformed", + description="Pipe with malformed cross-package remainder", + input_specs={"text": "Text"}, + output_spec="scoring_dep->..BadRef", + is_exported=True, + ), + ], + dependencies=[SCORING_LIB_ADDRESS], + dependency_aliases={"scoring_dep": SCORING_LIB_ADDRESS}, + ) + index.add_entry(malformed_pkg) + + return index + + +def make_test_package_index_with_multi_domain_same_concept_code() -> PackageIndex: + """Build a PackageIndex where one package has the same concept code in two domains. + + This tests that cross-package resolution picks the correct domain when + ``alias->domain.ConceptCode`` is used and the target package has that + concept code in multiple domains. + + Creates: + - multi-domain-pkg with: + - Domain pkg_test_scoring: PkgTestMetric (concept_ref: pkg_test_scoring.PkgTestMetric) + - Domain pkg_test_analytics: PkgTestMetric (concept_ref: pkg_test_analytics.PkgTestMetric) + - Two pipes producing each variant + - multi-domain-consumer that: + - Depends on multi-domain-pkg (alias: multi_domain) + - Has a pipe consuming multi_domain->pkg_test_scoring.PkgTestMetric + - Has a pipe consuming multi_domain->pkg_test_analytics.PkgTestMetric + """ + index = PackageIndex() + + multi_domain_pkg = PackageIndexEntry( + address=MULTI_DOMAIN_PKG_ADDRESS, + version="1.0.0", + description="Package with same concept code in two domains", + domains=[ + DomainEntry(domain_code="pkg_test_scoring"), + DomainEntry(domain_code="pkg_test_analytics"), + ], + concepts=[ + ConceptEntry( + concept_code="PkgTestMetric", + domain_code="pkg_test_scoring", + concept_ref="pkg_test_scoring.PkgTestMetric", + description="A scoring metric", + structure_fields=["score_value"], + ), + ConceptEntry( + concept_code="PkgTestMetric", + domain_code="pkg_test_analytics", + concept_ref="pkg_test_analytics.PkgTestMetric", + description="An analytics metric", + structure_fields=["analytics_value"], + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_compute_scoring_metric", + pipe_type="PipeLLM", + domain_code="pkg_test_scoring", + description="Compute scoring metric from text", + input_specs={"text": "Text"}, + output_spec="PkgTestMetric", + is_exported=True, + ), + PipeSignature( + pipe_code="pkg_test_compute_analytics_metric", + pipe_type="PipeLLM", + domain_code="pkg_test_analytics", + description="Compute analytics metric from text", + input_specs={"text": "Text"}, + output_spec="PkgTestMetric", + is_exported=True, + ), + ], + ) + index.add_entry(multi_domain_pkg) + + multi_domain_consumer = PackageIndexEntry( + address=MULTI_DOMAIN_CONSUMER_ADDRESS, + version="1.0.0", + description="Consumer that references specific domains of multi-domain-pkg", + domains=[DomainEntry(domain_code="pkg_test_consumer")], + concepts=[ + ConceptEntry( + concept_code="PkgTestConsumerResult", + domain_code="pkg_test_consumer", + concept_ref="pkg_test_consumer.PkgTestConsumerResult", + description="A consumer result", + ), + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_use_scoring_metric", + pipe_type="PipeLLM", + domain_code="pkg_test_consumer", + description="Use scoring metric from dependency", + input_specs={"metric": "multi_domain->pkg_test_scoring.PkgTestMetric"}, + output_spec="Text", + is_exported=True, + ), + PipeSignature( + pipe_code="pkg_test_use_analytics_metric", + pipe_type="PipeLLM", + domain_code="pkg_test_consumer", + description="Use analytics metric from dependency", + input_specs={"metric": "multi_domain->pkg_test_analytics.PkgTestMetric"}, + output_spec="Text", + is_exported=True, + ), + ], + dependencies=[MULTI_DOMAIN_PKG_ADDRESS], + dependency_aliases={"multi_domain": MULTI_DOMAIN_PKG_ADDRESS}, + ) + index.add_entry(multi_domain_consumer) + + return index diff --git a/tests/unit/pipelex/core/packages/graph/test_graph_builder.py b/tests/unit/pipelex/core/packages/graph/test_graph_builder.py new file mode 100644 index 000000000..4b7329fba --- /dev/null +++ b/tests/unit/pipelex/core/packages/graph/test_graph_builder.py @@ -0,0 +1,346 @@ +from pipelex.core.packages.graph.graph_builder import build_know_how_graph +from pipelex.core.packages.graph.models import ( + NATIVE_PACKAGE_ADDRESS, + ConceptId, + EdgeKind, +) +from pipelex.core.packages.index.models import PackageIndex +from tests.unit.pipelex.core.packages.graph.test_data import ( + ANALYTICS_LIB_ADDRESS, + LEGAL_TOOLS_ADDRESS, + MALFORMED_REF_ADDRESS, + MULTI_DOMAIN_CONSUMER_ADDRESS, + MULTI_DOMAIN_PKG_ADDRESS, + PHANTOM_PKG_ADDRESS, + QUALIFIED_REF_ADDRESS, + REFINING_APP_ADDRESS, + SCORING_LIB_ADDRESS, + make_test_package_index, + make_test_package_index_with_malformed_cross_package_ref, + make_test_package_index_with_multi_domain_same_concept_code, + make_test_package_index_with_qualified_concept_specs, + make_test_package_index_with_unresolvable_concepts, +) + + +class TestGraphBuilder: + """Tests for the know-how graph builder.""" + + def test_concept_nodes_created_for_all_packages(self) -> None: + """Builder creates concept nodes for every concept in the index.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + # 4 package concepts + 11 native concepts = 15 + package_concept_keys = [key for key in graph.concept_nodes if not key.startswith(NATIVE_PACKAGE_ADDRESS)] + assert len(package_concept_keys) == 4 + + def test_native_concept_nodes_created(self) -> None: + """Builder creates concept nodes for all native concepts.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + native_text = ConceptId(package_address=NATIVE_PACKAGE_ADDRESS, concept_ref="native.Text") + assert graph.get_concept_node(native_text) is not None + native_image = ConceptId(package_address=NATIVE_PACKAGE_ADDRESS, concept_ref="native.Image") + assert graph.get_concept_node(native_image) is not None + + def test_pipe_nodes_created(self) -> None: + """Builder creates pipe nodes for all pipes in the index.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + assert len(graph.pipe_nodes) == 5 + expected_pipes = { + f"{SCORING_LIB_ADDRESS}::pkg_test_compute_score", + f"{REFINING_APP_ADDRESS}::pkg_test_refine_score", + f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause", + f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause", + f"{ANALYTICS_LIB_ADDRESS}::pkg_test_compute_analytics", + } + assert set(graph.pipe_nodes.keys()) == expected_pipes + + def test_pipe_node_output_concept_resolved(self) -> None: + """Pipe node output concept is resolved to proper ConceptId.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + pipe_node = graph.get_pipe_node(extract_key) + assert pipe_node is not None + assert pipe_node.output_concept_id.package_address == LEGAL_TOOLS_ADDRESS + assert pipe_node.output_concept_id.concept_ref == "pkg_test_legal.PkgTestContractClause" + + def test_pipe_node_input_native_concept_resolved(self) -> None: + """Pipe input specs referencing native concepts resolve to native ConceptIds.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + pipe_node = graph.get_pipe_node(extract_key) + assert pipe_node is not None + text_input = pipe_node.input_concept_ids["text"] + assert text_input.is_native + assert text_input.concept_ref == "native.Text" + + def test_refinement_edge_created(self) -> None: + """Builder creates refinement edge for concepts with refines.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + assert len(graph.refinement_edges) == 1 + edge = graph.refinement_edges[0] + assert edge.kind == EdgeKind.REFINEMENT + assert edge.source_concept_id is not None + assert edge.source_concept_id.package_address == REFINING_APP_ADDRESS + assert edge.source_concept_id.concept_ref == "pkg_test_refining.PkgTestRefinedScore" + assert edge.target_concept_id is not None + assert edge.target_concept_id.package_address == SCORING_LIB_ADDRESS + assert edge.target_concept_id.concept_ref == "pkg_test_scoring_dep.PkgTestWeightedScore" + + def test_cross_package_refines_resolved(self) -> None: + """Cross-package refines (alias->domain.Code) resolves via dependency_aliases.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + refined_id = ConceptId( + package_address=REFINING_APP_ADDRESS, + concept_ref="pkg_test_refining.PkgTestRefinedScore", + ) + refined_node = graph.get_concept_node(refined_id) + assert refined_node is not None + assert refined_node.refines is not None + assert refined_node.refines.package_address == SCORING_LIB_ADDRESS + + def test_data_flow_edges_exact_match(self) -> None: + """Data flow edges connect pipes with exactly matching output->input concepts.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + # pkg_test_extract_clause outputs PkgTestContractClause + # pkg_test_analyze_clause inputs PkgTestContractClause on "clause" + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + outgoing = graph.get_outgoing_data_flow(extract_key) + analyze_targets = [edge for edge in outgoing if edge.target_pipe_key == f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause"] + assert len(analyze_targets) == 1 + assert analyze_targets[0].input_param == "clause" + + def test_data_flow_edges_native_concept(self) -> None: + """Pipes producing native Text connect to pipes consuming native Text.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + # pkg_test_analyze_clause outputs Text + analyze_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + outgoing = graph.get_outgoing_data_flow(analyze_key) + # Should connect to all pipes that consume Text as input + target_keys = {edge.target_pipe_key for edge in outgoing} + # All pipes with "text" input expecting "Text" should be targets + assert len(target_keys) >= 1 + + def test_data_flow_via_refinement(self) -> None: + """Pipe producing a refined concept connects to pipes expecting the base concept.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + # pkg_test_refine_score produces PkgTestRefinedScore which refines PkgTestWeightedScore + # If any pipe consumed PkgTestWeightedScore from scoring-lib, the refined producer would connect + refine_key = f"{REFINING_APP_ADDRESS}::pkg_test_refine_score" + outgoing = graph.get_outgoing_data_flow(refine_key) + # Verify the refinement ancestry was properly considered + # The refined output should be connectable to consumers of the base concept + assert isinstance(outgoing, list) + + def test_no_self_loops(self) -> None: + """Data flow edges never connect a pipe to itself.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + for edge in graph.data_flow_edges: + assert edge.source_pipe_key != edge.target_pipe_key + + def test_no_cross_package_concept_collision(self) -> None: + """Same concept code in different packages creates distinct ConceptIds.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + + scoring_id = ConceptId( + package_address=SCORING_LIB_ADDRESS, + concept_ref="pkg_test_scoring_dep.PkgTestWeightedScore", + ) + analytics_id = ConceptId( + package_address=ANALYTICS_LIB_ADDRESS, + concept_ref="pkg_test_analytics.PkgTestWeightedScore", + ) + assert scoring_id != analytics_id + assert graph.get_concept_node(scoring_id) is not None + assert graph.get_concept_node(analytics_id) is not None + + # Pipes in analytics-lib resolve to analytics concept, not scoring concept + analytics_pipe_key = f"{ANALYTICS_LIB_ADDRESS}::pkg_test_compute_analytics" + analytics_pipe = graph.get_pipe_node(analytics_pipe_key) + assert analytics_pipe is not None + assert analytics_pipe.output_concept_id.package_address == ANALYTICS_LIB_ADDRESS + + def test_empty_index_produces_empty_graph_with_natives(self) -> None: + """Empty index produces a graph with only native concept nodes.""" + index = PackageIndex() + graph = build_know_how_graph(index) + + assert len(graph.pipe_nodes) == 0 + assert len(graph.data_flow_edges) == 0 + assert len(graph.refinement_edges) == 0 + # Should still have native concepts + assert len(graph.concept_nodes) > 0 + native_keys = [key for key in graph.concept_nodes if key.startswith(NATIVE_PACKAGE_ADDRESS)] + assert len(native_keys) == len(graph.concept_nodes) + + def test_pipe_with_unresolvable_output_excluded(self) -> None: + """Pipe referencing a nonexistent output concept is excluded from the graph.""" + index = make_test_package_index_with_unresolvable_concepts() + graph = build_know_how_graph(index) + + bad_output_key = f"{PHANTOM_PKG_ADDRESS}::pkg_test_bad_output_pipe" + assert graph.get_pipe_node(bad_output_key) is None + + def test_pipe_with_unresolvable_input_excluded(self) -> None: + """Pipe referencing a nonexistent input concept is excluded from the graph.""" + index = make_test_package_index_with_unresolvable_concepts() + graph = build_know_how_graph(index) + + bad_input_key = f"{PHANTOM_PKG_ADDRESS}::pkg_test_bad_input_pipe" + assert graph.get_pipe_node(bad_input_key) is None + + def test_valid_pipe_not_affected_by_unresolvable_siblings(self) -> None: + """Valid pipes in the same package are still included when siblings have unresolvable concepts.""" + index = make_test_package_index_with_unresolvable_concepts() + graph = build_know_how_graph(index) + + valid_key = f"{PHANTOM_PKG_ADDRESS}::pkg_test_valid_pipe" + pipe_node = graph.get_pipe_node(valid_key) + assert pipe_node is not None + assert pipe_node.output_concept_id.package_address == PHANTOM_PKG_ADDRESS + assert pipe_node.output_concept_id.concept_ref == "pkg_test_phantom.PkgTestValidConcept" + + def test_no_phantom_concept_nodes_created(self) -> None: + """Unresolvable concept specs do not create phantom entries in concept_nodes.""" + index = make_test_package_index_with_unresolvable_concepts() + graph = build_know_how_graph(index) + + # Only the valid concept and native concepts should exist + non_native_keys = [key for key in graph.concept_nodes if not key.startswith(NATIVE_PACKAGE_ADDRESS)] + assert len(non_native_keys) == 1 + expected_key = f"{PHANTOM_PKG_ADDRESS}::pkg_test_phantom.PkgTestValidConcept" + assert non_native_keys[0] == expected_key + + def test_domain_qualified_output_spec_resolved(self) -> None: + """Pipe with domain-qualified output spec (domain.ConceptCode) is included in graph.""" + index = make_test_package_index_with_qualified_concept_specs() + graph = build_know_how_graph(index) + + pipe_key = f"{QUALIFIED_REF_ADDRESS}::pkg_test_produce_result" + pipe_node = graph.get_pipe_node(pipe_key) + assert pipe_node is not None, f"Pipe '{pipe_key}' should be in graph but was excluded" + assert pipe_node.output_concept_id.package_address == QUALIFIED_REF_ADDRESS + assert pipe_node.output_concept_id.concept_ref == "pkg_test_qualified.PkgTestLocalResult" + + def test_cross_package_input_spec_resolved(self) -> None: + """Pipe with cross-package input spec (alias->domain.Code) is included in graph.""" + index = make_test_package_index_with_qualified_concept_specs() + graph = build_know_how_graph(index) + + pipe_key = f"{QUALIFIED_REF_ADDRESS}::pkg_test_consume_score" + pipe_node = graph.get_pipe_node(pipe_key) + assert pipe_node is not None, f"Pipe '{pipe_key}' should be in graph but was excluded" + # The input should resolve to the scoring-lib's concept + score_input = pipe_node.input_concept_ids["score"] + assert score_input.package_address == SCORING_LIB_ADDRESS + assert score_input.concept_ref == "pkg_test_scoring_dep.PkgTestWeightedScore" + + def test_cross_package_output_spec_resolved(self) -> None: + """Pipe with cross-package output spec (alias->domain.Code) is included in graph.""" + index = make_test_package_index_with_qualified_concept_specs() + graph = build_know_how_graph(index) + + pipe_key = f"{QUALIFIED_REF_ADDRESS}::pkg_test_forward_score" + pipe_node = graph.get_pipe_node(pipe_key) + assert pipe_node is not None, f"Pipe '{pipe_key}' should be in graph but was excluded" + assert pipe_node.output_concept_id.package_address == SCORING_LIB_ADDRESS + assert pipe_node.output_concept_id.concept_ref == "pkg_test_scoring_dep.PkgTestWeightedScore" + + def test_all_qualified_ref_pipes_included(self) -> None: + """All pipes using qualified/cross-package concept specs are included in graph.""" + index = make_test_package_index_with_qualified_concept_specs() + graph = build_know_how_graph(index) + + expected_pipes = { + f"{SCORING_LIB_ADDRESS}::pkg_test_compute_score", + f"{QUALIFIED_REF_ADDRESS}::pkg_test_produce_result", + f"{QUALIFIED_REF_ADDRESS}::pkg_test_consume_score", + f"{QUALIFIED_REF_ADDRESS}::pkg_test_forward_score", + } + assert set(graph.pipe_nodes.keys()) == expected_pipes + + def test_malformed_cross_package_ref_excluded_without_crash(self) -> None: + """Malformed cross-package remainder is excluded gracefully, not raising.""" + index = make_test_package_index_with_malformed_cross_package_ref() + # This must not raise QualifiedRefError + graph = build_know_how_graph(index) + + # The malformed pipe should be excluded + bad_key = f"{MALFORMED_REF_ADDRESS}::pkg_test_malformed_ref_pipe" + assert graph.get_pipe_node(bad_key) is None + + def test_valid_pipe_survives_malformed_sibling(self) -> None: + """Valid pipe in same package is still included when sibling has malformed ref.""" + index = make_test_package_index_with_malformed_cross_package_ref() + graph = build_know_how_graph(index) + + valid_key = f"{MALFORMED_REF_ADDRESS}::pkg_test_valid_pipe" + pipe_node = graph.get_pipe_node(valid_key) + assert pipe_node is not None + assert pipe_node.output_concept_id.package_address == MALFORMED_REF_ADDRESS + + def test_cross_package_ref_resolves_correct_domain_when_same_code_in_multiple_domains(self) -> None: + """Cross-package ref alias->domain.Code resolves to the specified domain, not another domain with same code.""" + index = make_test_package_index_with_multi_domain_same_concept_code() + graph = build_know_how_graph(index) + + # Both concept nodes should exist in the multi-domain package + scoring_concept = ConceptId( + package_address=MULTI_DOMAIN_PKG_ADDRESS, + concept_ref="pkg_test_scoring.PkgTestMetric", + ) + analytics_concept = ConceptId( + package_address=MULTI_DOMAIN_PKG_ADDRESS, + concept_ref="pkg_test_analytics.PkgTestMetric", + ) + assert graph.get_concept_node(scoring_concept) is not None + assert graph.get_concept_node(analytics_concept) is not None + + # The consumer pipe referencing multi_domain->pkg_test_scoring.PkgTestMetric + # must resolve to the scoring domain, NOT analytics + scoring_pipe_key = f"{MULTI_DOMAIN_CONSUMER_ADDRESS}::pkg_test_use_scoring_metric" + scoring_pipe = graph.get_pipe_node(scoring_pipe_key) + assert scoring_pipe is not None, f"Pipe '{scoring_pipe_key}' should be in graph" + scoring_input = scoring_pipe.input_concept_ids["metric"] + assert scoring_input.package_address == MULTI_DOMAIN_PKG_ADDRESS + assert scoring_input.concept_ref == "pkg_test_scoring.PkgTestMetric" + + # The consumer pipe referencing multi_domain->pkg_test_analytics.PkgTestMetric + # must resolve to the analytics domain, NOT scoring + analytics_pipe_key = f"{MULTI_DOMAIN_CONSUMER_ADDRESS}::pkg_test_use_analytics_metric" + analytics_pipe = graph.get_pipe_node(analytics_pipe_key) + assert analytics_pipe is not None, f"Pipe '{analytics_pipe_key}' should be in graph" + analytics_input = analytics_pipe.input_concept_ids["metric"] + assert analytics_input.package_address == MULTI_DOMAIN_PKG_ADDRESS + assert analytics_input.concept_ref == "pkg_test_analytics.PkgTestMetric" + + def test_multi_domain_same_code_both_concept_nodes_preserved(self) -> None: + """Same concept code in two domains within one package creates distinct nodes.""" + index = make_test_package_index_with_multi_domain_same_concept_code() + graph = build_know_how_graph(index) + + # Count non-native concept nodes from the multi-domain package + multi_domain_keys = [key for key in graph.concept_nodes if key.startswith(MULTI_DOMAIN_PKG_ADDRESS)] + assert len(multi_domain_keys) == 2, f"Expected 2 concept nodes for multi-domain-pkg, got {len(multi_domain_keys)}: {multi_domain_keys}" diff --git a/tests/unit/pipelex/core/packages/graph/test_graph_models.py b/tests/unit/pipelex/core/packages/graph/test_graph_models.py new file mode 100644 index 000000000..304f70a87 --- /dev/null +++ b/tests/unit/pipelex/core/packages/graph/test_graph_models.py @@ -0,0 +1,200 @@ +from typing import ClassVar + +import pytest +from pydantic import ValidationError + +from pipelex.core.packages.graph.models import ( + NATIVE_PACKAGE_ADDRESS, + ConceptId, + ConceptNode, + EdgeKind, + GraphEdge, + KnowHowGraph, + PipeNode, +) + + +class TestData: + NATIVE_TEXT_ID: ClassVar[ConceptId] = ConceptId( + package_address=NATIVE_PACKAGE_ADDRESS, + concept_ref="native.Text", + ) + + SCORING_CONCEPT_ID: ClassVar[ConceptId] = ConceptId( + package_address="github.com/pkg_test/scoring-lib", + concept_ref="pkg_test_scoring_dep.PkgTestWeightedScore", + ) + + LEGAL_CONCEPT_ID: ClassVar[ConceptId] = ConceptId( + package_address="github.com/pkg_test/legal-tools", + concept_ref="pkg_test_legal.PkgTestContractClause", + ) + + REFINED_CONCEPT_ID: ClassVar[ConceptId] = ConceptId( + package_address="github.com/pkg_test/refining-app", + concept_ref="pkg_test_refining.PkgTestRefinedScore", + ) + + PIPE_NODE: ClassVar[PipeNode] = PipeNode( + package_address="github.com/pkg_test/legal-tools", + pipe_code="pkg_test_extract_clause", + pipe_type="PipeLLM", + domain_code="pkg_test_legal", + description="Extract clause from text", + is_exported=True, + input_concept_ids={ + "text": ConceptId(package_address=NATIVE_PACKAGE_ADDRESS, concept_ref="native.Text"), + }, + output_concept_id=ConceptId( + package_address="github.com/pkg_test/legal-tools", + concept_ref="pkg_test_legal.PkgTestContractClause", + ), + ) + + +class TestGraphModels: + """Tests for know-how graph data models.""" + + def test_concept_id_node_key(self) -> None: + """ConceptId.node_key combines package_address and concept_ref.""" + assert TestData.SCORING_CONCEPT_ID.node_key == "github.com/pkg_test/scoring-lib::pkg_test_scoring_dep.PkgTestWeightedScore" + + def test_concept_id_concept_code(self) -> None: + """ConceptId.concept_code returns the last segment of concept_ref.""" + assert TestData.SCORING_CONCEPT_ID.concept_code == "PkgTestWeightedScore" + assert TestData.NATIVE_TEXT_ID.concept_code == "Text" + + def test_concept_id_is_native(self) -> None: + """ConceptId.is_native returns True for native package address.""" + assert TestData.NATIVE_TEXT_ID.is_native is True + assert TestData.SCORING_CONCEPT_ID.is_native is False + + def test_concept_id_is_frozen(self) -> None: + """ConceptId fields cannot be mutated.""" + with pytest.raises(ValidationError): + TestData.SCORING_CONCEPT_ID.package_address = "changed" # type: ignore[misc] + + def test_concept_id_equality(self) -> None: + """Two ConceptIds with the same fields are equal.""" + duplicate = ConceptId( + package_address="github.com/pkg_test/scoring-lib", + concept_ref="pkg_test_scoring_dep.PkgTestWeightedScore", + ) + assert duplicate == TestData.SCORING_CONCEPT_ID + + def test_concept_id_different_packages_not_equal(self) -> None: + """Same concept_ref in different packages are not equal.""" + analytics_score = ConceptId( + package_address="github.com/pkg_test/analytics-lib", + concept_ref="pkg_test_analytics.PkgTestWeightedScore", + ) + assert analytics_score != TestData.SCORING_CONCEPT_ID + + def test_edge_kind_values(self) -> None: + """EdgeKind enum has expected values.""" + assert EdgeKind.DATA_FLOW == "data_flow" + assert EdgeKind.REFINEMENT == "refinement" + + def test_pipe_node_key(self) -> None: + """PipeNode.node_key combines package_address and pipe_code.""" + assert TestData.PIPE_NODE.node_key == "github.com/pkg_test/legal-tools::pkg_test_extract_clause" + + def test_pipe_node_is_frozen(self) -> None: + """PipeNode fields cannot be mutated.""" + with pytest.raises(ValidationError): + TestData.PIPE_NODE.pipe_code = "changed" # type: ignore[misc] + + def test_concept_node_without_refines(self) -> None: + """ConceptNode can be created without a refines link.""" + node = ConceptNode( + concept_id=TestData.LEGAL_CONCEPT_ID, + description="A clause from a contract", + ) + assert node.refines is None + assert node.structure_fields == [] + + def test_concept_node_with_refines(self) -> None: + """ConceptNode stores a refinement link to another ConceptId.""" + node = ConceptNode( + concept_id=TestData.REFINED_CONCEPT_ID, + description="A refined score", + refines=TestData.SCORING_CONCEPT_ID, + ) + assert node.refines is not None + assert node.refines.concept_code == "PkgTestWeightedScore" + + def test_graph_edge_data_flow(self) -> None: + """GraphEdge with DATA_FLOW kind stores pipe keys and input param.""" + edge = GraphEdge( + kind=EdgeKind.DATA_FLOW, + source_pipe_key="pkg_a::pipe_x", + target_pipe_key="pkg_b::pipe_y", + input_param="text", + ) + assert edge.kind == EdgeKind.DATA_FLOW + assert edge.source_pipe_key == "pkg_a::pipe_x" + assert edge.source_concept_id is None + + def test_graph_edge_refinement(self) -> None: + """GraphEdge with REFINEMENT kind stores concept ids.""" + edge = GraphEdge( + kind=EdgeKind.REFINEMENT, + source_concept_id=TestData.REFINED_CONCEPT_ID, + target_concept_id=TestData.SCORING_CONCEPT_ID, + ) + assert edge.kind == EdgeKind.REFINEMENT + assert edge.source_concept_id == TestData.REFINED_CONCEPT_ID + assert edge.source_pipe_key is None + + def test_know_how_graph_get_pipe_node(self) -> None: + """KnowHowGraph.get_pipe_node retrieves by key, returns None for unknown.""" + graph = KnowHowGraph() + graph.pipe_nodes[TestData.PIPE_NODE.node_key] = TestData.PIPE_NODE + assert graph.get_pipe_node(TestData.PIPE_NODE.node_key) is not None + assert graph.get_pipe_node("nonexistent::key") is None + + def test_know_how_graph_get_concept_node(self) -> None: + """KnowHowGraph.get_concept_node retrieves by ConceptId.""" + graph = KnowHowGraph() + node = ConceptNode( + concept_id=TestData.LEGAL_CONCEPT_ID, + description="A clause", + ) + graph.concept_nodes[TestData.LEGAL_CONCEPT_ID.node_key] = node + assert graph.get_concept_node(TestData.LEGAL_CONCEPT_ID) is not None + assert graph.get_concept_node(TestData.SCORING_CONCEPT_ID) is None + + def test_know_how_graph_outgoing_data_flow(self) -> None: + """KnowHowGraph.get_outgoing_data_flow filters edges by source pipe.""" + graph = KnowHowGraph() + edge_a = GraphEdge( + kind=EdgeKind.DATA_FLOW, + source_pipe_key="pkg::pipe_a", + target_pipe_key="pkg::pipe_b", + input_param="text", + ) + edge_b = GraphEdge( + kind=EdgeKind.DATA_FLOW, + source_pipe_key="pkg::pipe_b", + target_pipe_key="pkg::pipe_c", + input_param="data", + ) + graph.data_flow_edges.extend([edge_a, edge_b]) + outgoing = graph.get_outgoing_data_flow("pkg::pipe_a") + assert len(outgoing) == 1 + assert outgoing[0].target_pipe_key == "pkg::pipe_b" + + def test_know_how_graph_incoming_data_flow(self) -> None: + """KnowHowGraph.get_incoming_data_flow filters edges by target pipe.""" + graph = KnowHowGraph() + edge = GraphEdge( + kind=EdgeKind.DATA_FLOW, + source_pipe_key="pkg::pipe_a", + target_pipe_key="pkg::pipe_b", + input_param="text", + ) + graph.data_flow_edges.append(edge) + incoming = graph.get_incoming_data_flow("pkg::pipe_b") + assert len(incoming) == 1 + assert incoming[0].source_pipe_key == "pkg::pipe_a" + assert graph.get_incoming_data_flow("pkg::pipe_a") == [] diff --git a/tests/unit/pipelex/core/packages/graph/test_query_engine.py b/tests/unit/pipelex/core/packages/graph/test_query_engine.py new file mode 100644 index 000000000..59d770ba4 --- /dev/null +++ b/tests/unit/pipelex/core/packages/graph/test_query_engine.py @@ -0,0 +1,234 @@ +from pipelex.core.packages.graph.graph_builder import build_know_how_graph +from pipelex.core.packages.graph.models import ( + NATIVE_PACKAGE_ADDRESS, + ConceptId, +) +from pipelex.core.packages.graph.query_engine import KnowHowQueryEngine +from tests.unit.pipelex.core.packages.graph.test_data import ( + ANALYTICS_LIB_ADDRESS, + LEGAL_TOOLS_ADDRESS, + REFINING_APP_ADDRESS, + SCORING_LIB_ADDRESS, + make_test_package_index, +) + +NATIVE_TEXT_ID = ConceptId(package_address=NATIVE_PACKAGE_ADDRESS, concept_ref="native.Text") +SCORING_CONCEPT_ID = ConceptId(package_address=SCORING_LIB_ADDRESS, concept_ref="pkg_test_scoring_dep.PkgTestWeightedScore") +LEGAL_CONCEPT_ID = ConceptId(package_address=LEGAL_TOOLS_ADDRESS, concept_ref="pkg_test_legal.PkgTestContractClause") +REFINED_CONCEPT_ID = ConceptId(package_address=REFINING_APP_ADDRESS, concept_ref="pkg_test_refining.PkgTestRefinedScore") +ANALYTICS_CONCEPT_ID = ConceptId(package_address=ANALYTICS_LIB_ADDRESS, concept_ref="pkg_test_analytics.PkgTestWeightedScore") + + +class TestQueryEngine: + """Tests for the know-how query engine.""" + + def test_what_can_i_do_with_native_text(self) -> None: + """Querying with native Text finds all pipes that accept Text input.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + pipes = engine.query_what_can_i_do(NATIVE_TEXT_ID) + pipe_codes = {pipe.pipe_code for pipe in pipes} + # All pipes that have a "text" input expecting Text + assert "pkg_test_compute_score" in pipe_codes + assert "pkg_test_refine_score" in pipe_codes + assert "pkg_test_extract_clause" in pipe_codes + assert "pkg_test_compute_analytics" in pipe_codes + + def test_what_can_i_do_with_specific_concept(self) -> None: + """Querying with a specific concept finds pipes accepting that concept.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + pipes = engine.query_what_can_i_do(LEGAL_CONCEPT_ID) + pipe_codes = {pipe.pipe_code for pipe in pipes} + assert "pkg_test_analyze_clause" in pipe_codes + + def test_what_can_i_do_with_refined_concept(self) -> None: + """Querying with a refined concept also finds pipes expecting the base concept.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # PkgTestRefinedScore refines PkgTestWeightedScore + # If there were pipes expecting PkgTestWeightedScore, they'd be found + pipes = engine.query_what_can_i_do(REFINED_CONCEPT_ID) + # At minimum, the result should be a list (possibly empty if no pipe expects WeightedScore) + assert isinstance(pipes, list) + + def test_what_produces_text(self) -> None: + """Querying what produces native Text finds pipes with Text output.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + pipes = engine.query_what_produces(NATIVE_TEXT_ID) + pipe_codes = {pipe.pipe_code for pipe in pipes} + assert "pkg_test_analyze_clause" in pipe_codes + + def test_what_produces_specific_concept(self) -> None: + """Querying what produces a specific concept finds the right pipes.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + pipes = engine.query_what_produces(LEGAL_CONCEPT_ID) + pipe_codes = {pipe.pipe_code for pipe in pipes} + assert "pkg_test_extract_clause" in pipe_codes + + def test_what_produces_base_concept_includes_refinements(self) -> None: + """Querying what produces a base concept also finds pipes producing refinements.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # PkgTestRefinedScore refines PkgTestWeightedScore from scoring-lib + pipes = engine.query_what_produces(SCORING_CONCEPT_ID) + pipe_codes = {pipe.pipe_code for pipe in pipes} + assert "pkg_test_compute_score" in pipe_codes + assert "pkg_test_refine_score" in pipe_codes + + def test_check_compatibility_match(self) -> None: + """Compatible pipes return the matching input parameter names.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # extract_clause produces PkgTestContractClause, analyze_clause consumes it on "clause" + source_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + target_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + params = engine.check_compatibility(source_key, target_key) + assert "clause" in params + + def test_check_compatibility_via_refinement(self) -> None: + """Refined output is compatible with base concept input if such exists.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # analyze_clause outputs Text; all Text-input pipes are compatible + source_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + target_key = f"{SCORING_LIB_ADDRESS}::pkg_test_compute_score" + params = engine.check_compatibility(source_key, target_key) + assert "text" in params + + def test_check_compatibility_incompatible(self) -> None: + """Incompatible pipes return empty list.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # compute_score outputs PkgTestWeightedScore; analyze_clause expects PkgTestContractClause + source_key = f"{SCORING_LIB_ADDRESS}::pkg_test_compute_score" + target_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + params = engine.check_compatibility(source_key, target_key) + assert params == [] + + def test_check_compatibility_no_cross_package_collision(self) -> None: + """PkgTestWeightedScore from scoring-lib != PkgTestWeightedScore from analytics-lib.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # compute_score (scoring) outputs scoring's WeightedScore + # compute_analytics (analytics) outputs analytics's WeightedScore + # They should NOT be considered the same concept, so neither feeds the other + scoring_key = f"{SCORING_LIB_ADDRESS}::pkg_test_compute_score" + analytics_key = f"{ANALYTICS_LIB_ADDRESS}::pkg_test_compute_analytics" + # Scoring's output should not be compatible with analytics pipe's inputs (different WeightedScore) + params_scoring_to_analytics = engine.check_compatibility(scoring_key, analytics_key) + params_analytics_to_scoring = engine.check_compatibility(analytics_key, scoring_key) + assert params_scoring_to_analytics == [] + assert params_analytics_to_scoring == [] + + def test_resolve_refinement_chain(self) -> None: + """Refinement chain walks from refined to base concept.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + chain = engine.resolve_refinement_chain(REFINED_CONCEPT_ID) + assert len(chain) == 2 + assert chain[0] == REFINED_CONCEPT_ID + assert chain[1] == SCORING_CONCEPT_ID + + def test_resolve_refinement_chain_no_refines(self) -> None: + """Concept without refines returns a single-element chain.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + chain = engine.resolve_refinement_chain(LEGAL_CONCEPT_ID) + assert len(chain) == 1 + assert chain[0] == LEGAL_CONCEPT_ID + + def test_i_have_i_need_direct(self) -> None: + """Direct single-pipe chain from Text to PkgTestContractClause.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + chains = engine.query_i_have_i_need(NATIVE_TEXT_ID, LEGAL_CONCEPT_ID) + assert len(chains) >= 1 + # Should find extract_clause (Text -> PkgTestContractClause) as a single-step chain + single_step_chains = [chain for chain in chains if len(chain) == 1] + assert len(single_step_chains) >= 1 + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + found = any(extract_key in chain for chain in single_step_chains) + assert found + + def test_i_have_i_need_two_step(self) -> None: + """Two-step chain: Text -> PkgTestContractClause -> Text (extract then analyze).""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # Text -> ? -> Text: should find chains going through extract_clause + analyze_clause + chains = engine.query_i_have_i_need(NATIVE_TEXT_ID, NATIVE_TEXT_ID, max_depth=3) + two_step_chains = [chain for chain in chains if len(chain) == 2] + extract_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_extract_clause" + analyze_key = f"{LEGAL_TOOLS_ADDRESS}::pkg_test_analyze_clause" + found_extract_analyze = any(chain[0] == extract_key and chain[1] == analyze_key for chain in two_step_chains) + assert found_extract_analyze + + def test_i_have_i_need_no_path(self) -> None: + """No path when the desired output is unreachable.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + # PkgTestContractClause -> PkgTestWeightedScore: analyze_clause produces Text, + # then compute_score produces WeightedScore. That's 2 steps. + # But with max_depth=0, should find nothing + nonexistent_concept = ConceptId( + package_address="nonexistent", + concept_ref="nonexistent.Concept", + ) + chains = engine.query_i_have_i_need(NATIVE_TEXT_ID, nonexistent_concept) + assert chains == [] + + def test_i_have_i_need_max_depth(self) -> None: + """Max depth limits the chain length.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + chains = engine.query_i_have_i_need(NATIVE_TEXT_ID, NATIVE_TEXT_ID, max_depth=1) + # Only single-step chains allowed; no Text->Text single pipe exists + # so only pipes that directly output Text are valid (none takes Text and outputs Text in 1 step) + # Actually analyze_clause takes ContractClause->Text, not Text->Text + # So with max_depth=1, there should be no results since no single pipe takes Text and outputs Text + for chain in chains: + assert len(chain) <= 1 + + def test_i_have_i_need_sorted_shortest_first(self) -> None: + """Results are sorted with shortest chains first.""" + index = make_test_package_index() + graph = build_know_how_graph(index) + engine = KnowHowQueryEngine(graph) + + chains = engine.query_i_have_i_need(NATIVE_TEXT_ID, NATIVE_TEXT_ID, max_depth=3) + for idx in range(len(chains) - 1): + assert len(chains[idx]) <= len(chains[idx + 1]) diff --git a/tests/unit/pipelex/core/packages/index/test_index_builder.py b/tests/unit/pipelex/core/packages/index/test_index_builder.py new file mode 100644 index 000000000..8ecb9dcaf --- /dev/null +++ b/tests/unit/pipelex/core/packages/index/test_index_builder.py @@ -0,0 +1,167 @@ +import shutil +from pathlib import Path + +import pytest + +from pipelex.core.packages.exceptions import IndexBuildError +from pipelex.core.packages.index.index_builder import ( + build_index_entry_from_package, + build_index_from_cache, + build_index_from_project, +) + +PACKAGES_DATA_DIR = Path(__file__).resolve().parents[5] / "data" / "packages" + + +class TestIndexBuilder: + """Tests for the package index builder.""" + + def test_build_entry_from_legal_tools(self) -> None: + """Build index entry from legal_tools test package with multi-domain exports.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + + assert entry.address == "github.com/pipelexlab/legal-tools" + assert entry.version == "1.0.0" + assert entry.description == "Legal document analysis tools" + assert entry.display_name == "Legal Tools" + assert entry.authors == ["PipelexLab"] + assert entry.license == "MIT" + + def test_build_entry_extracts_domains(self) -> None: + """Builder discovers all domains from .mthds files.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + + domain_codes = {dom.domain_code for dom in entry.domains} + assert "pkg_test_legal.contracts" in domain_codes + assert "pkg_test_scoring" in domain_codes + + def test_build_entry_extracts_concepts(self) -> None: + """Builder extracts concept entries from blueprints.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + + concept_codes = {concept.concept_code for concept in entry.concepts} + assert "PkgTestContractClause" in concept_codes + assert "PkgTestScoreResult" in concept_codes + + def test_build_entry_concept_ref_includes_domain(self) -> None: + """Concept entries have domain-qualified concept_ref.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + + clause = next(concept for concept in entry.concepts if concept.concept_code == "PkgTestContractClause") + assert clause.concept_ref == "pkg_test_legal.contracts.PkgTestContractClause" + assert clause.domain_code == "pkg_test_legal.contracts" + + def test_build_entry_extracts_pipe_signatures(self) -> None: + """Builder extracts pipe signatures with input/output specs.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + + pipe_codes = {pipe.pipe_code for pipe in entry.pipes} + assert "pkg_test_extract_clause" in pipe_codes + assert "pkg_test_analyze_contract" in pipe_codes + assert "pkg_test_compute_weighted_score" in pipe_codes + + def test_build_entry_pipe_input_output_specs(self) -> None: + """Pipe signatures carry input and output concept specs as strings.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + + extract = next(pipe for pipe in entry.pipes if pipe.pipe_code == "pkg_test_extract_clause") + assert extract.input_specs == {"text": "Text"} + assert extract.output_spec == "PkgTestContractClause" + assert extract.pipe_type == "PipeLLM" + + def test_build_entry_pipe_export_status(self) -> None: + """Exported pipes are marked, non-exported pipes are not.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "scoring_dep") + + exported_pipe = next(pipe for pipe in entry.pipes if pipe.pipe_code == "pkg_test_compute_score") + assert exported_pipe.is_exported is True + + internal_pipe = next(pipe for pipe in entry.pipes if pipe.pipe_code == "pkg_test_internal_helper") + assert internal_pipe.is_exported is False + + def test_build_entry_main_pipe_auto_exported(self) -> None: + """main_pipe is auto-exported even if not in exports list.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "scoring_dep") + + compute = next(pipe for pipe in entry.pipes if pipe.pipe_code == "pkg_test_compute_score") + assert compute.is_exported is True + + def test_build_entry_minimal_package(self) -> None: + """Build index entry from a minimal package with no exports section.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "minimal_package") + + assert entry.address == "github.com/pipelexlab/minimal" + assert entry.version == "0.1.0" + assert len(entry.pipes) == 1 + # No exports section = all pipes are public + assert entry.pipes[0].is_exported is True + assert entry.pipes[0].pipe_code == "pkg_test_hello" + + def test_build_entry_dependencies_listed(self) -> None: + """Builder extracts dependency addresses from manifest.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + assert "github.com/pipelexlab/scoring-lib" in entry.dependencies + + def test_build_entry_dependency_aliases(self) -> None: + """Builder populates dependency_aliases mapping alias to address.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "legal_tools") + assert "scoring_lib" in entry.dependency_aliases + assert entry.dependency_aliases["scoring_lib"] == "github.com/pipelexlab/scoring-lib" + + def test_build_entry_dependency_aliases_empty_when_no_deps(self) -> None: + """Builder sets empty dependency_aliases when package has no dependencies.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "minimal_package") + assert entry.dependency_aliases == {} + + def test_build_entry_concept_with_refines(self) -> None: + """Builder captures cross-package refines on concepts.""" + entry = build_index_entry_from_package(PACKAGES_DATA_DIR / "refining_consumer") + + refined = next(concept for concept in entry.concepts if concept.concept_code == "PkgTestRefinedScore") + assert refined.refines == "scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore" + + def test_build_entry_no_manifest_raises(self) -> None: + """Building from a directory without METHODS.toml raises IndexBuildError.""" + with pytest.raises(IndexBuildError, match=r"No METHODS\.toml found"): + build_index_entry_from_package(PACKAGES_DATA_DIR / "standalone_bundle") + + def test_build_entry_nonexistent_dir_raises(self) -> None: + """Building from a nonexistent directory raises IndexBuildError.""" + with pytest.raises(IndexBuildError): + build_index_entry_from_package(PACKAGES_DATA_DIR / "nonexistent") + + def test_build_index_from_empty_cache(self, tmp_path: Path) -> None: + """build_index_from_cache returns empty index for nonexistent cache.""" + index = build_index_from_cache(cache_root=tmp_path / "no_cache") + assert len(index.entries) == 0 + + def test_build_index_from_cache_with_packages(self, tmp_path: Path) -> None: + """build_index_from_cache discovers packages in the cache layout.""" + # Set up cache layout: cache_root/address/version/ + cache_root = tmp_path / "cache" + pkg_dir = cache_root / "github.com" / "pipelexlab" / "scoring-lib" / "2.0.0" + pkg_dir.mkdir(parents=True) + src = PACKAGES_DATA_DIR / "scoring_dep" + for item in src.iterdir(): + if item.is_file(): + shutil.copy(item, pkg_dir / item.name) + + index = build_index_from_cache(cache_root=cache_root) + assert len(index.entries) == 1 + entry = index.get_entry("github.com/mthds/scoring-lib") + assert entry is not None + assert entry.version == "2.0.0" + + def test_build_index_from_project(self) -> None: + """build_index_from_project indexes the project itself.""" + index = build_index_from_project(PACKAGES_DATA_DIR / "minimal_package") + + assert len(index.entries) == 1 + entry = index.get_entry("github.com/pipelexlab/minimal") + assert entry is not None + assert entry.version == "0.1.0" + + def test_build_index_from_project_no_manifest(self, tmp_path: Path) -> None: + """build_index_from_project returns empty index when no manifest exists.""" + index = build_index_from_project(tmp_path) + assert len(index.entries) == 0 diff --git a/tests/unit/pipelex/core/packages/index/test_index_models.py b/tests/unit/pipelex/core/packages/index/test_index_models.py new file mode 100644 index 000000000..c75fa2bb5 --- /dev/null +++ b/tests/unit/pipelex/core/packages/index/test_index_models.py @@ -0,0 +1,218 @@ +from typing import ClassVar + +import pytest +from pydantic import ValidationError + +from pipelex.core.packages.index.models import ( + ConceptEntry, + DomainEntry, + PackageIndex, + PackageIndexEntry, + PipeSignature, +) + + +class TestData: + PIPE_SIG: ClassVar[PipeSignature] = PipeSignature( + pipe_code="pkg_test_extract", + pipe_type="PipeLLM", + domain_code="pkg_test_legal", + description="Extract clauses", + input_specs={"text": "Text"}, + output_spec="PkgTestContractClause", + is_exported=True, + ) + + CONCEPT_ENTRY: ClassVar[ConceptEntry] = ConceptEntry( + concept_code="PkgTestContractClause", + domain_code="pkg_test_legal", + concept_ref="pkg_test_legal.PkgTestContractClause", + description="A clause from a contract", + ) + + CONCEPT_WITH_REFINES: ClassVar[ConceptEntry] = ConceptEntry( + concept_code="PkgTestRefinedScore", + domain_code="pkg_test_refining", + concept_ref="pkg_test_refining.PkgTestRefinedScore", + description="A refined score", + refines="scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore", + ) + + CONCEPT_WITH_STRUCTURE: ClassVar[ConceptEntry] = ConceptEntry( + concept_code="PkgTestDetailedScore", + domain_code="pkg_test_scoring", + concept_ref="pkg_test_scoring.PkgTestDetailedScore", + description="A detailed score with fields", + structure_fields=["score_value", "confidence", "explanation"], + ) + + DOMAIN_ENTRY: ClassVar[DomainEntry] = DomainEntry( + domain_code="pkg_test_legal", + description="Legal analysis tools", + ) + + ENTRY: ClassVar[PackageIndexEntry] = PackageIndexEntry( + address="github.com/pipelexlab/legal-tools", + display_name="Legal Tools", + version="1.0.0", + description="Legal document analysis tools", + authors=["PipelexLab"], + license="MIT", + domains=[DomainEntry(domain_code="pkg_test_legal", description="Legal tools")], + concepts=[ + ConceptEntry( + concept_code="PkgTestContractClause", + domain_code="pkg_test_legal", + concept_ref="pkg_test_legal.PkgTestContractClause", + description="A clause from a contract", + ) + ], + pipes=[ + PipeSignature( + pipe_code="pkg_test_extract", + pipe_type="PipeLLM", + domain_code="pkg_test_legal", + description="Extract clauses", + input_specs={"text": "Text"}, + output_spec="PkgTestContractClause", + is_exported=True, + ) + ], + dependencies=["github.com/pipelexlab/scoring-lib"], + dependency_aliases={"scoring_dep": "github.com/pipelexlab/scoring-lib"}, + ) + + ENTRY_B: ClassVar[PackageIndexEntry] = PackageIndexEntry( + address="github.com/pipelexlab/scoring-lib", + version="2.0.0", + description="Scoring library", + pipes=[ + PipeSignature( + pipe_code="pkg_test_score", + pipe_type="PipeLLM", + domain_code="pkg_test_scoring", + description="Score items", + input_specs={"item": "Text"}, + output_spec="PkgTestScoreResult", + is_exported=True, + ) + ], + ) + + +class TestIndexModels: + """Tests for package index data models.""" + + def test_pipe_signature_is_frozen(self) -> None: + """PipeSignature fields cannot be mutated.""" + with pytest.raises(ValidationError): + TestData.PIPE_SIG.pipe_code = "changed" # type: ignore[misc] + + def test_concept_entry_without_refines(self) -> None: + """ConceptEntry can be created without refines or structure_fields.""" + entry = TestData.CONCEPT_ENTRY + assert entry.concept_code == "PkgTestContractClause" + assert entry.refines is None + assert entry.structure_fields == [] + + def test_concept_entry_with_refines(self) -> None: + """ConceptEntry stores cross-package refines references.""" + entry = TestData.CONCEPT_WITH_REFINES + assert entry.refines == "scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore" + + def test_concept_entry_with_structure_fields(self) -> None: + """ConceptEntry stores structure field names.""" + entry = TestData.CONCEPT_WITH_STRUCTURE + assert entry.structure_fields == ["score_value", "confidence", "explanation"] + + def test_domain_entry_with_description(self) -> None: + """DomainEntry stores domain code and optional description.""" + entry = TestData.DOMAIN_ENTRY + assert entry.domain_code == "pkg_test_legal" + assert entry.description == "Legal analysis tools" + + def test_domain_entry_without_description(self) -> None: + """DomainEntry allows None description.""" + entry = DomainEntry(domain_code="pkg_test_minimal", description=None) + assert entry.description is None + + def test_package_index_entry_fields(self) -> None: + """PackageIndexEntry stores all expected metadata.""" + entry = TestData.ENTRY + assert entry.address == "github.com/pipelexlab/legal-tools" + assert entry.display_name == "Legal Tools" + assert entry.version == "1.0.0" + assert entry.description == "Legal document analysis tools" + assert entry.authors == ["PipelexLab"] + assert entry.license == "MIT" + assert len(entry.domains) == 1 + assert len(entry.concepts) == 1 + assert len(entry.pipes) == 1 + assert entry.dependencies == ["github.com/pipelexlab/scoring-lib"] + assert entry.dependency_aliases == {"scoring_dep": "github.com/pipelexlab/scoring-lib"} + + def test_package_index_entry_is_frozen(self) -> None: + """PackageIndexEntry fields cannot be mutated.""" + with pytest.raises(ValidationError): + TestData.ENTRY.version = "2.0.0" # type: ignore[misc] + + def test_pipe_signature_input_output(self) -> None: + """PipeSignature stores input specs and output spec as strings.""" + sig = TestData.PIPE_SIG + assert sig.input_specs == {"text": "Text"} + assert sig.output_spec == "PkgTestContractClause" + + def test_package_index_add_and_get(self) -> None: + """PackageIndex.add_entry stores and get_entry retrieves by address.""" + index = PackageIndex() + index.add_entry(TestData.ENTRY) + result = index.get_entry("github.com/pipelexlab/legal-tools") + assert result is not None + assert result.address == "github.com/pipelexlab/legal-tools" + + def test_package_index_get_nonexistent(self) -> None: + """PackageIndex.get_entry returns None for unknown address.""" + index = PackageIndex() + assert index.get_entry("github.com/nonexistent") is None + + def test_package_index_remove(self) -> None: + """PackageIndex.remove_entry removes and returns True, or False if not found.""" + index = PackageIndex() + index.add_entry(TestData.ENTRY) + assert index.remove_entry("github.com/pipelexlab/legal-tools") is True + assert index.get_entry("github.com/pipelexlab/legal-tools") is None + assert index.remove_entry("github.com/pipelexlab/legal-tools") is False + + def test_package_index_replace_entry(self) -> None: + """PackageIndex.add_entry replaces an existing entry with the same address.""" + index = PackageIndex() + index.add_entry(TestData.ENTRY) + updated = PackageIndexEntry( + address="github.com/pipelexlab/legal-tools", + version="2.0.0", + description="Updated", + ) + index.add_entry(updated) + result = index.get_entry("github.com/pipelexlab/legal-tools") + assert result is not None + assert result.version == "2.0.0" + + def test_package_index_all_concepts(self) -> None: + """PackageIndex.all_concepts returns concepts from all entries.""" + index = PackageIndex() + index.add_entry(TestData.ENTRY) + index.add_entry(TestData.ENTRY_B) + all_concepts = index.all_concepts() + assert len(all_concepts) == 1 # Only ENTRY has a concept + assert all_concepts[0][0] == "github.com/pipelexlab/legal-tools" + assert all_concepts[0][1].concept_code == "PkgTestContractClause" + + def test_package_index_all_pipes(self) -> None: + """PackageIndex.all_pipes returns pipes from all entries.""" + index = PackageIndex() + index.add_entry(TestData.ENTRY) + index.add_entry(TestData.ENTRY_B) + all_pipes = index.all_pipes() + assert len(all_pipes) == 2 + pipe_codes = {pipe.pipe_code for _, pipe in all_pipes} + assert pipe_codes == {"pkg_test_extract", "pkg_test_score"} diff --git a/tests/unit/pipelex/core/packages/test_bundle_scanner.py b/tests/unit/pipelex/core/packages/test_bundle_scanner.py new file mode 100644 index 000000000..b510d54dd --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_bundle_scanner.py @@ -0,0 +1,198 @@ +from pathlib import Path + +import pytest + +from pipelex.core.packages.bundle_scanner import build_domain_exports_from_scan, scan_bundles_for_domain_info + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent.parent / "data" / "packages" + + +class TestBundleScanner: + """Tests for the shared bundle scanning and domain-exports-building functions.""" + + def test_scan_bundles_extracts_domains_and_pipes(self): + """Scanning multi-domain .mthds files returns correct domain/pipe mappings.""" + mthds_files = sorted(PACKAGES_DATA_DIR.joinpath("legal_tools").rglob("*.mthds")) + assert len(mthds_files) >= 2, "Expected at least two .mthds fixtures" + + domain_pipes, domain_main_pipes, _blueprints, errors = scan_bundles_for_domain_info(mthds_files) + + assert not errors + assert "pkg_test_legal.contracts" in domain_pipes + assert "pkg_test_scoring" in domain_pipes + assert "pkg_test_extract_clause" in domain_pipes["pkg_test_legal.contracts"] + assert "pkg_test_analyze_contract" in domain_pipes["pkg_test_legal.contracts"] + assert "pkg_test_compute_weighted_score" in domain_pipes["pkg_test_scoring"] + assert domain_main_pipes["pkg_test_legal.contracts"] == "pkg_test_extract_clause" + assert domain_main_pipes["pkg_test_scoring"] == "pkg_test_compute_weighted_score" + + def test_scan_bundles_collects_parse_errors(self, tmp_path: Path): + """Files that cannot be parsed are collected as error strings.""" + bad_file = tmp_path / "broken.mthds" + bad_file.write_text("[broken\n", encoding="utf-8") + + _domain_pipes, _domain_main_pipes, _blueprints, errors = scan_bundles_for_domain_info([bad_file]) + + assert len(errors) == 1 + assert str(bad_file) in errors[0] + + def test_scan_bundles_handles_empty_input(self): + """Passing no files returns empty results.""" + domain_pipes, domain_main_pipes, blueprints, errors = scan_bundles_for_domain_info([]) + + assert domain_pipes == {} + assert domain_main_pipes == {} + assert blueprints == [] + assert errors == [] + + def test_build_exports_main_pipe_first(self): + """Main pipe appears first in the exports pipe list, remaining sorted.""" + domain_pipes = { + "alpha": ["zebra_pipe", "alpha_pipe", "main_alpha"], + } + domain_main_pipes = { + "alpha": "main_alpha", + } + + exports = build_domain_exports_from_scan(domain_pipes, domain_main_pipes) + + assert len(exports) == 1 + assert exports[0].domain_path == "alpha" + assert exports[0].pipes[0] == "main_alpha" + assert exports[0].pipes == ["main_alpha", "alpha_pipe", "zebra_pipe"] + + def test_build_exports_skips_empty_domains(self): + """Domains with no pipes produce no exports entry.""" + domain_pipes = { + "has_pipes": ["some_pipe"], + "empty_domain": [], + } + domain_main_pipes: dict[str, str] = {} + + exports = build_domain_exports_from_scan(domain_pipes, domain_main_pipes) + + assert len(exports) == 1 + assert exports[0].domain_path == "has_pipes" + + def test_build_exports_sorts_domains(self): + """Domains appear in sorted order in the exports list.""" + domain_pipes = { + "zebra_domain": ["pipe_z"], + "alpha_domain": ["pipe_a"], + } + domain_main_pipes: dict[str, str] = {} + + exports = build_domain_exports_from_scan(domain_pipes, domain_main_pipes) + + assert len(exports) == 2 + assert exports[0].domain_path == "alpha_domain" + assert exports[1].domain_path == "zebra_domain" + + def test_scan_bundles_detects_main_pipe_conflict(self, tmp_path: Path): + """Two bundles sharing a domain but declaring different main_pipe produce an error.""" + bundle_a = tmp_path / "bundle_a.mthds" + bundle_a.write_text( + 'domain = "shared_domain"\n' + 'main_pipe = "pipe_alpha"\n' + "\n" + "[pipe.pipe_alpha]\n" + 'type = "PipeLLM"\n' + 'description = "Alpha"\n' + 'output = "Text"\n' + 'prompt = "alpha"\n', + encoding="utf-8", + ) + bundle_b = tmp_path / "bundle_b.mthds" + bundle_b.write_text( + 'domain = "shared_domain"\n' + 'main_pipe = "pipe_beta"\n' + "\n" + "[pipe.pipe_beta]\n" + 'type = "PipeLLM"\n' + 'description = "Beta"\n' + 'output = "Text"\n' + 'prompt = "beta"\n', + encoding="utf-8", + ) + + _domain_pipes, domain_main_pipes, _blueprints, errors = scan_bundles_for_domain_info( + sorted([bundle_a, bundle_b]), + ) + + assert len(errors) == 1 + assert "shared_domain" in errors[0] + assert "pipe_alpha" in errors[0] + assert "pipe_beta" in errors[0] + assert str(bundle_b) in errors[0] + # First value kept, conflict reported but not overwritten + assert domain_main_pipes["shared_domain"] == "pipe_alpha" + + def test_scan_bundles_allows_identical_main_pipe(self, tmp_path: Path): + """Two bundles declaring the same main_pipe for a domain is not an error.""" + bundle_a = tmp_path / "bundle_a.mthds" + bundle_a.write_text( + 'domain = "shared_domain"\n' + 'main_pipe = "same_pipe"\n' + "\n" + "[pipe.same_pipe]\n" + 'type = "PipeLLM"\n' + 'description = "A"\n' + 'output = "Text"\n' + 'prompt = "a"\n', + encoding="utf-8", + ) + bundle_b = tmp_path / "bundle_b.mthds" + bundle_b.write_text( + 'domain = "shared_domain"\n' + 'main_pipe = "same_pipe"\n' + "\n" + "[pipe.same_pipe]\n" + 'type = "PipeLLM"\n' + 'description = "B copy"\n' + 'output = "Text"\n' + 'prompt = "b"\n', + encoding="utf-8", + ) + + _domain_pipes, domain_main_pipes, _blueprints, errors = scan_bundles_for_domain_info( + sorted([bundle_a, bundle_b]), + ) + + assert not errors + assert domain_main_pipes["shared_domain"] == "same_pipe" + + @pytest.mark.parametrize( + ("topic", "domain_pipes", "domain_main_pipes", "expected_first_pipe"), + [ + ( + "main_pipe present and also in pipe list", + {"dom": ["other", "main_p"]}, + {"dom": "main_p"}, + "main_p", + ), + ( + "main_pipe not in pipe list", + {"dom": ["other"]}, + {"dom": "main_p"}, + "main_p", + ), + ( + "no main_pipe", + {"dom": ["beta", "alpha"]}, + {}, + "alpha", + ), + ], + ) + def test_build_exports_main_pipe_ordering( + self, + topic: str, + domain_pipes: dict[str, list[str]], + domain_main_pipes: dict[str, str], + expected_first_pipe: str, + ): + """Main pipe ordering scenarios.""" + _ = topic # Used for test identification + exports = build_domain_exports_from_scan(domain_pipes, domain_main_pipes) + assert exports[0].pipes[0] == expected_first_pipe diff --git a/tests/unit/pipelex/core/packages/test_concept_validation_cross_package.py b/tests/unit/pipelex/core/packages/test_concept_validation_cross_package.py new file mode 100644 index 000000000..2c84e1fe7 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_concept_validation_cross_package.py @@ -0,0 +1,35 @@ +from pipelex.core.concepts.validation import is_concept_ref_or_code_valid, is_concept_ref_valid + + +class TestConceptValidationCrossPackage: + """Tests for cross-package concept reference validation.""" + + def test_cross_package_concept_ref_is_valid(self): + """Cross-package concept ref 'alias->domain.Code' should be valid.""" + assert is_concept_ref_valid("scoring_lib->scoring.WeightedScore") is True + + def test_cross_package_concept_ref_hierarchical_domain(self): + """Cross-package concept ref with hierarchical domain is valid.""" + assert is_concept_ref_valid("my_lib->legal.contracts.NonCompeteClause") is True + + def test_cross_package_concept_ref_invalid_concept_code(self): + """Cross-package concept ref with invalid concept code is invalid.""" + assert is_concept_ref_valid("my_lib->scoring.bad_code") is False + + def test_cross_package_concept_ref_no_domain(self): + """Cross-package concept ref without domain is invalid (bare code after ->).""" + assert is_concept_ref_valid("my_lib->WeightedScore") is False + + def test_cross_package_concept_ref_or_code_is_valid(self): + """Cross-package refs pass is_concept_ref_or_code_valid.""" + assert is_concept_ref_or_code_valid("scoring_lib->scoring.WeightedScore") is True + + def test_cross_package_concept_ref_or_code_bare_code(self): + """Cross-package ref with bare code after -> (no domain) passes if code is PascalCase.""" + # "alias->Code" has no dot in remainder, so it's treated as a bare code + assert is_concept_ref_or_code_valid("my_lib->WeightedScore") is True + + def test_regular_concept_ref_still_valid(self): + """Regular concept refs still work.""" + assert is_concept_ref_valid("scoring.WeightedScore") is True + assert is_concept_ref_or_code_valid("WeightedScore") is True diff --git a/tests/unit/pipelex/core/packages/test_cross_package_loading.py b/tests/unit/pipelex/core/packages/test_cross_package_loading.py new file mode 100644 index 000000000..1ec57d1e2 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_cross_package_loading.py @@ -0,0 +1,130 @@ +import pytest +from pytest_mock import MockerFixture + +from pipelex.core.concepts.concept import Concept +from pipelex.core.concepts.concept_factory import ConceptFactory, DomainAndConceptCode +from pipelex.core.concepts.exceptions import ConceptFactoryError +from pipelex.core.qualified_ref import QualifiedRef +from pipelex.libraries.concept.concept_library import ConceptLibrary +from pipelex.libraries.concept.exceptions import ConceptLibraryError +from pipelex.libraries.pipe.exceptions import PipeLibraryError +from pipelex.libraries.pipe.pipe_library import PipeLibrary + + +def _make_stub_concept(code: str, domain_code: str) -> Concept: + """Create a minimal Concept for testing.""" + return Concept( + code=code, + domain_code=domain_code, + description="Test concept", + structure_class_name="TextContent", + ) + + +class TestCrossPackageLoading: + """Tests for cross-package pipe and concept loading/lookup.""" + + def test_pipe_library_add_dependency_pipe(self, mocker: MockerFixture): + """add_dependency_pipe() stores pipe with aliased key.""" + library = PipeLibrary.make_empty() + mock_pipe = mocker.MagicMock() + mock_pipe.code = "compute_score" + library.add_dependency_pipe(alias="scoring_lib", pipe=mock_pipe) + assert "scoring_lib->compute_score" in library.root + + def test_pipe_library_get_optional_cross_package_ref(self, mocker: MockerFixture): + """get_optional_pipe() resolves 'alias->domain.pipe_code' to 'alias->pipe_code'.""" + library = PipeLibrary.make_empty() + mock_pipe = mocker.MagicMock() + mock_pipe.code = "compute_score" + mock_pipe.domain_code = "scoring" + library.add_dependency_pipe(alias="scoring_lib", pipe=mock_pipe) + + result = library.get_optional_pipe("scoring_lib->scoring.compute_score") + assert result is not None + assert result.code == "compute_score" + + def test_pipe_library_get_optional_cross_package_direct_key(self, mocker: MockerFixture): + """get_optional_pipe() resolves direct 'alias->pipe_code' key.""" + library = PipeLibrary.make_empty() + mock_pipe = mocker.MagicMock() + mock_pipe.code = "compute_score" + library.add_dependency_pipe(alias="scoring_lib", pipe=mock_pipe) + + result = library.get_optional_pipe("scoring_lib->compute_score") + assert result is not None + assert result.code == "compute_score" + + def test_pipe_library_duplicate_dependency_pipe_raises(self, mocker: MockerFixture): + """add_dependency_pipe() raises on duplicate.""" + library = PipeLibrary.make_empty() + mock_pipe = mocker.MagicMock() + mock_pipe.code = "compute_score" + library.add_dependency_pipe(alias="scoring_lib", pipe=mock_pipe) + with pytest.raises(PipeLibraryError, match="already exists"): + library.add_dependency_pipe(alias="scoring_lib", pipe=mock_pipe) + + def test_concept_library_add_dependency_concept(self): + """add_dependency_concept() stores concept with aliased key.""" + library = ConceptLibrary.make_empty() + concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + library.add_dependency_concept(alias="scoring_lib", concept=concept) + assert "scoring_lib->scoring.WeightedScore" in library.root + + def test_concept_library_get_required_cross_package_ref(self): + """get_required_concept() resolves cross-package refs.""" + library = ConceptLibrary.make_empty() + concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + library.add_dependency_concept(alias="scoring_lib", concept=concept) + + result = library.get_required_concept("scoring_lib->scoring.WeightedScore") + assert result.code == "WeightedScore" + + def test_concept_library_cross_package_not_found(self): + """get_required_concept() raises when cross-package concept not loaded.""" + library = ConceptLibrary.make_empty() + with pytest.raises(ConceptLibraryError, match="not found"): + library.get_required_concept("unknown_lib->domain.Missing") + + def test_concept_library_duplicate_dependency_concept_raises(self): + """add_dependency_concept() raises on duplicate aliased key.""" + library = ConceptLibrary.make_empty() + concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + library.add_dependency_concept(alias="scoring_lib", concept=concept) + with pytest.raises(ConceptLibraryError, match="already exists"): + library.add_dependency_concept(alias="scoring_lib", concept=concept) + + def test_concept_factory_cross_package_domain_and_code(self): + """ConceptFactory resolves cross-package refs to aliased domain codes.""" + result = ConceptFactory.make_domain_and_concept_code_from_concept_ref_or_code( + concept_ref_or_code="scoring_lib->scoring.WeightedScore", + ) + assert isinstance(result, DomainAndConceptCode) + assert result.domain_code == "scoring_lib->scoring" + assert result.concept_code == "WeightedScore" + + def test_concept_factory_cross_package_requires_domain(self): + """Cross-package concept ref without domain raises error.""" + with pytest.raises(ConceptFactoryError, match="must include a domain"): + ConceptFactory.make_domain_and_concept_code_from_concept_ref_or_code( + concept_ref_or_code="scoring_lib->WeightedScore", + ) + + def test_concept_factory_make_refine_cross_package(self): + """make_refine() passes through cross-package refs unchanged.""" + result = ConceptFactory.make_refine( + refine="scoring_lib->scoring.BaseScore", + domain_code="my_domain", + ) + assert result == "scoring_lib->scoring.BaseScore" + + def test_qualified_ref_has_cross_package_prefix(self): + """QualifiedRef.has_cross_package_prefix detects '->' syntax.""" + assert QualifiedRef.has_cross_package_prefix("lib->domain.pipe") is True + assert QualifiedRef.has_cross_package_prefix("domain.pipe") is False + + def test_qualified_ref_split_cross_package_ref(self): + """QualifiedRef.split_cross_package_ref splits correctly.""" + alias, remainder = QualifiedRef.split_cross_package_ref("scoring_lib->scoring.compute_score") + assert alias == "scoring_lib" + assert remainder == "scoring.compute_score" diff --git a/tests/unit/pipelex/core/packages/test_cross_package_refs.py b/tests/unit/pipelex/core/packages/test_cross_package_refs.py new file mode 100644 index 000000000..33a9a79d6 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_cross_package_refs.py @@ -0,0 +1,129 @@ +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.core.packages.manifest import MthdsPackageManifest, PackageDependency +from pipelex.core.packages.visibility import PackageVisibilityChecker, check_visibility_for_blueprints +from pipelex.core.qualified_ref import QualifiedRef +from pipelex.pipe_controllers.sequence.pipe_sequence_blueprint import PipeSequenceBlueprint +from pipelex.pipe_controllers.sub_pipe_blueprint import SubPipeBlueprint + + +class TestCrossPackageRefs: + """Tests for cross-package '->' reference detection.""" + + def test_has_cross_package_prefix(self): + """Detect '->' in raw reference strings.""" + assert QualifiedRef.has_cross_package_prefix("my_lib->scoring.compute") is True + assert QualifiedRef.has_cross_package_prefix("scoring.compute") is False + assert QualifiedRef.has_cross_package_prefix("compute") is False + + def test_split_cross_package_ref(self): + """Split 'alias->domain.pipe' correctly.""" + alias, remainder = QualifiedRef.split_cross_package_ref("my_lib->scoring.compute") + assert alias == "my_lib" + assert remainder == "scoring.compute" + + def test_known_alias_no_error(self): + """Cross-package ref with alias in dependencies -> info emitted, no error.""" + manifest = MthdsPackageManifest( + address="github.com/org/test", + version="1.0.0", + description="Test package", + dependencies=[ + PackageDependency( + address="github.com/org/scoring-lib", + version="1.0.0", + alias="scoring_lib", + ), + ], + ) + bundle = PipelexBundleBlueprint( + domain="my_domain", + pipe={ + "my_pipe": PipeSequenceBlueprint( + type="PipeSequence", + description="Test", + output="Text", + steps=[ + SubPipeBlueprint(pipe="scoring_lib->scoring.compute_score"), + ], + ), + }, + ) + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle]) + errors = checker.validate_cross_package_references() + # Known alias -> no error (only info emitted via log) + assert errors == [] + + def test_unknown_alias_produces_error(self): + """Cross-package ref with alias NOT in dependencies -> error.""" + manifest = MthdsPackageManifest( + address="github.com/org/test", + version="1.0.0", + description="Test package", + ) + bundle = PipelexBundleBlueprint( + domain="my_domain", + pipe={ + "my_pipe": PipeSequenceBlueprint( + type="PipeSequence", + description="Test", + output="Text", + steps=[ + SubPipeBlueprint(pipe="unknown_lib->scoring.compute_score"), + ], + ), + }, + ) + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle]) + errors = checker.validate_cross_package_references() + assert len(errors) == 1 + assert "unknown_lib" in errors[0].message + assert "[dependencies]" in errors[0].message + + def test_no_cross_package_refs_no_warnings(self): + """No '->' refs at all -> no warnings or errors.""" + manifest = MthdsPackageManifest( + address="github.com/org/test", + version="1.0.0", + description="Test package", + ) + bundle = PipelexBundleBlueprint( + domain="my_domain", + pipe={ + "my_pipe": PipeSequenceBlueprint( + type="PipeSequence", + description="Test", + output="Text", + steps=[ + SubPipeBlueprint(pipe="scoring.compute_score"), + ], + ), + }, + ) + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle]) + errors = checker.validate_cross_package_references() + assert errors == [] + + def test_check_visibility_includes_cross_package_validation(self): + """check_visibility_for_blueprints() validates both intra-package and cross-package refs.""" + manifest = MthdsPackageManifest( + address="github.com/org/test", + version="1.0.0", + description="Test package", + ) + bundle = PipelexBundleBlueprint( + domain="my_domain", + pipe={ + "my_pipe": PipeSequenceBlueprint( + type="PipeSequence", + description="Test", + output="Text", + steps=[ + SubPipeBlueprint(pipe="unknown_dep->scoring.compute_score"), + ], + ), + }, + ) + # The convenience function should now include cross-package validation + errors = check_visibility_for_blueprints(manifest=manifest, blueprints=[bundle]) + unknown_alias_errors = [err for err in errors if "unknown_dep" in err.message] + assert len(unknown_alias_errors) >= 1 diff --git a/tests/unit/pipelex/core/packages/test_data.py b/tests/unit/pipelex/core/packages/test_data.py new file mode 100644 index 000000000..5162eec9c --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_data.py @@ -0,0 +1,195 @@ +from typing import ClassVar + +from pipelex.core.packages.manifest import DomainExports, MthdsPackageManifest, PackageDependency + +# ============================================================ +# TOML strings for parser tests +# ============================================================ + +FULL_MANIFEST_TOML = """\ +[package] +address = "github.com/pipelexlab/legal-tools" +display_name = "Legal Tools" +version = "1.0.0" +description = "Legal document analysis tools" +authors = ["PipelexLab"] +license = "MIT" +mthds_version = "0.5.0" + +[dependencies] +scoring_lib = { address = "github.com/pipelexlab/scoring-lib", version = "2.0.0" } + +[exports.legal.contracts] +pipes = ["extract_clause", "analyze_contract"] + +[exports.scoring] +pipes = ["compute_weighted_score"] +""" + +MINIMAL_MANIFEST_TOML = """\ +[package] +address = "github.com/pipelexlab/minimal" +version = "0.1.0" +description = "A minimal MTHDS package" +""" + +EMPTY_EXPORTS_DEPS_TOML = """\ +[package] +address = "github.com/pipelexlab/empty" +version = "1.0.0" +description = "Package with empty exports and dependencies" + +[dependencies] + +[exports] +""" + +MULTI_LEVEL_EXPORTS_TOML = """\ +[package] +address = "github.com/pipelexlab/deep" +version = "1.0.0" +description = "Deep nested exports package" + +[exports.legal.contracts.shareholder] +pipes = ["extract_shareholder_clause"] + +[exports.legal.contracts] +pipes = ["extract_clause"] + +[exports.scoring] +pipes = ["compute_score"] +""" + +INVALID_TOML_SYNTAX = """\ +[package +address = "broken +""" + +MISSING_PACKAGE_SECTION_TOML = """\ +[something_else] +foo = "bar" +""" + +MISSING_REQUIRED_FIELDS_TOML = """\ +[package] +description = "Missing address and version" +""" + +NON_TABLE_DEPENDENCY_TOML = """\ +[package] +address = "github.com/pipelexlab/bad-deps" +version = "1.0.0" +description = "Package with a non-table dependency entry" + +[dependencies] +foo = "1.0.0" +""" + +INVALID_DOMAIN_PATH_EXPORTS_TOML = """\ +[package] +address = "github.com/pipelexlab/bad-exports" +version = "1.0.0" +description = "Package with an invalid domain path in exports" + +[exports.InvalidDomain] +pipes = ["extract_clause"] +""" + +INVALID_PIPE_NAME_EXPORTS_TOML = """\ +[package] +address = "github.com/pipelexlab/bad-pipes" +version = "1.0.0" +description = "Package with an invalid pipe name in exports" + +[exports.legal] +pipes = ["BadPipe"] +""" + +# ============================================================ +# Expected model instances +# ============================================================ + + +class ManifestTestData: + """Reusable expected manifest instances for test assertions.""" + + FULL_MANIFEST: ClassVar[MthdsPackageManifest] = MthdsPackageManifest( + address="github.com/pipelexlab/legal-tools", + display_name="Legal Tools", + version="1.0.0", + description="Legal document analysis tools", + authors=["PipelexLab"], + license="MIT", + mthds_version="0.5.0", + dependencies=[ + PackageDependency( + address="github.com/pipelexlab/scoring-lib", + version="2.0.0", + alias="scoring_lib", + ), + ], + exports=[ + DomainExports(domain_path="legal.contracts", pipes=["extract_clause", "analyze_contract"]), + DomainExports(domain_path="scoring", pipes=["compute_weighted_score"]), + ], + ) + + MINIMAL_MANIFEST: ClassVar[MthdsPackageManifest] = MthdsPackageManifest( + address="github.com/pipelexlab/minimal", + version="0.1.0", + description="A minimal MTHDS package", + ) + + +# ============================================================ +# Lock file TOML strings for lock file tests +# ============================================================ + +LOCK_FILE_TOML = """\ +["github.com/pipelexlab/document-processing"] +version = "1.2.3" +hash = "sha256:a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2" +source = "https://github.com/pipelexlab/document-processing" + +["github.com/pipelexlab/scoring-lib"] +version = "0.5.1" +hash = "sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" +source = "https://github.com/pipelexlab/scoring-lib" +""" + +EMPTY_LOCK_FILE_TOML = "" + +RESERVED_DOMAIN_EXPORTS_TOML = """\ +[package] +address = "github.com/pipelexlab/reserved-domain" +version = "1.0.0" +description = "Package with a reserved domain in exports" + +[exports.native] +pipes = ["some_pipe"] +""" + +NON_LIST_PIPES_EXPORTS_TOML = """\ +[package] +address = "github.com/pipelexlab/bad-pipes-type" +version = "1.0.0" +description = "Package with a string instead of list for pipes" + +[exports.legal] +pipes = "single_pipe" +""" + +UNKNOWN_PACKAGE_KEYS_TOML = """\ +[package] +address = "github.com/pipelexlab/unknown-keys" +version = "1.0.0" +description = "Package with unknown keys" +homepage = "https://example.com" +""" + +INVALID_HASH_LOCK_FILE_TOML = """\ +["github.com/pipelexlab/bad-hash"] +version = "1.0.0" +hash = "md5:not-a-valid-hash" +source = "https://github.com/pipelexlab/bad-hash" +""" diff --git a/tests/unit/pipelex/core/packages/test_dependency_resolver.py b/tests/unit/pipelex/core/packages/test_dependency_resolver.py new file mode 100644 index 000000000..042a72d92 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_dependency_resolver.py @@ -0,0 +1,116 @@ +from pathlib import Path + +import pytest + +from pipelex.core.packages.dependency_resolver import ResolvedDependency, resolve_local_dependencies +from pipelex.core.packages.exceptions import DependencyResolveError +from pipelex.core.packages.manifest import MthdsPackageManifest, PackageDependency + +PACKAGES_DIR = Path(__file__).resolve().parents[4] / "data" / "packages" + + +class TestDependencyResolver: + """Tests for local dependency resolution.""" + + def test_resolve_local_path_dependency(self): + """Resolve a dependency with a valid local path.""" + manifest = MthdsPackageManifest( + address="github.com/mthds/consumer-app", + version="1.0.0", + description="Consumer", + dependencies=[ + PackageDependency( + address="github.com/mthds/scoring-lib", + version="2.0.0", + alias="scoring_dep", + path="../scoring_dep", + ), + ], + ) + package_root = PACKAGES_DIR / "consumer_package" + resolved = resolve_local_dependencies(manifest=manifest, package_root=package_root) + + assert len(resolved) == 1 + dep = resolved[0] + assert dep.alias == "scoring_dep" + assert dep.package_root == (PACKAGES_DIR / "scoring_dep").resolve() + assert len(dep.mthds_files) >= 1 + # The scoring_dep has exports, so exported_pipe_codes should be populated + assert dep.exported_pipe_codes is not None + assert "pkg_test_compute_score" in dep.exported_pipe_codes + + def test_dependency_without_path_is_skipped(self): + """Dependencies without a path field are skipped.""" + manifest = MthdsPackageManifest( + address="github.com/mthds/consumer-app", + version="1.0.0", + description="Consumer", + dependencies=[ + PackageDependency( + address="github.com/mthds/scoring-lib", + version="2.0.0", + alias="scoring_dep", + # No path field + ), + ], + ) + package_root = PACKAGES_DIR / "consumer_package" + resolved = resolve_local_dependencies(manifest=manifest, package_root=package_root) + + assert len(resolved) == 0 + + def test_nonexistent_path_raises_error(self): + """A dependency pointing to a non-existent path raises DependencyResolveError.""" + manifest = MthdsPackageManifest( + address="github.com/mthds/consumer-app", + version="1.0.0", + description="Consumer", + dependencies=[ + PackageDependency( + address="github.com/mthds/scoring-lib", + version="2.0.0", + alias="scoring_dep", + path="../nonexistent_dir", + ), + ], + ) + package_root = PACKAGES_DIR / "consumer_package" + with pytest.raises(DependencyResolveError, match="does not exist"): + resolve_local_dependencies(manifest=manifest, package_root=package_root) + + def test_dependency_without_manifest_has_no_exports(self): + """A dependency directory without METHODS.toml -> None exported_pipe_codes (all public).""" + manifest = MthdsPackageManifest( + address="github.com/mthds/consumer-app", + version="1.0.0", + description="Consumer", + dependencies=[ + PackageDependency( + address="github.com/mthds/standalone", + version="1.0.0", + alias="standalone", + path="../standalone_bundle", + ), + ], + ) + package_root = PACKAGES_DIR / "consumer_package" + resolved = resolve_local_dependencies(manifest=manifest, package_root=package_root) + + assert len(resolved) == 1 + dep = resolved[0] + assert dep.alias == "standalone" + assert dep.manifest is None + # No manifest = None exports = all public + assert dep.exported_pipe_codes is None + + def test_resolved_dependency_is_frozen(self, tmp_path: Path): + """ResolvedDependency should be immutable (frozen model).""" + dep = ResolvedDependency( + alias="test", + address="github.com/test/test", + manifest=None, + package_root=tmp_path / "test", + mthds_files=[], + exported_pipe_codes=None, + ) + assert dep.alias == "test" diff --git a/tests/unit/pipelex/core/packages/test_discovery.py b/tests/unit/pipelex/core/packages/test_discovery.py new file mode 100644 index 000000000..562874781 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_discovery.py @@ -0,0 +1,78 @@ +from pathlib import Path + +import pytest + +from pipelex.core.packages.discovery import MANIFEST_FILENAME, find_package_manifest +from pipelex.core.packages.exceptions import ManifestParseError + +# Path to the physical test data +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent.parent / "data" / "packages" + + +class TestManifestDiscovery: + """Tests for METHODS.toml walk-up discovery.""" + + def test_find_manifest_from_bundle_in_subdir(self): + """Find METHODS.toml from a bundle path like legal/contracts.mthds.""" + bundle_path = PACKAGES_DATA_DIR / "legal_tools" / "legal" / "contracts.mthds" + manifest = find_package_manifest(bundle_path) + assert manifest is not None + assert manifest.address == "github.com/pipelexlab/legal-tools" + assert manifest.version == "1.0.0" + + def test_find_manifest_from_bundle_in_same_dir(self): + """Find METHODS.toml when bundle is in the same directory as manifest.""" + bundle_path = PACKAGES_DATA_DIR / "minimal_package" / "core.mthds" + manifest = find_package_manifest(bundle_path) + assert manifest is not None + assert manifest.address == "github.com/pipelexlab/minimal" + + def test_standalone_bundle_no_manifest(self): + """Standalone bundle with no METHODS.toml returns None.""" + bundle_path = PACKAGES_DATA_DIR / "standalone_bundle" / "my_pipe.mthds" + # This will walk up until it finds the repo's .git directory + manifest = find_package_manifest(bundle_path) + assert manifest is None + + def test_git_boundary_stops_search(self, tmp_path: Path): + """Discovery stops at .git/ directory boundary.""" + # Create structure: tmp_path/METHODS.toml (above git boundary) + # tmp_path/project/.git/ + # tmp_path/project/bundle.mthds + project_dir = tmp_path / "project" + project_dir.mkdir() + (project_dir / ".git").mkdir() + bundle_path = project_dir / "bundle.mthds" + bundle_path.touch() + + # Put a METHODS.toml above the .git boundary (should NOT be found) + manifest_content = '[package]\naddress = "github.com/org/above-git"\nversion = "1.0.0"\ndescription = "Above git"\n' + (tmp_path / MANIFEST_FILENAME).write_text(manifest_content) + + result = find_package_manifest(bundle_path) + assert result is None + + def test_manifest_in_parent_found(self, tmp_path: Path): + """METHODS.toml two levels up from bundle is found.""" + # tmp_path/METHODS.toml + # tmp_path/sub/deep/bundle.mthds + manifest_content = '[package]\naddress = "github.com/org/deep"\nversion = "2.0.0"\ndescription = "Deep package"\n' + (tmp_path / MANIFEST_FILENAME).write_text(manifest_content) + deep_dir = tmp_path / "sub" / "deep" + deep_dir.mkdir(parents=True) + bundle_path = deep_dir / "bundle.mthds" + bundle_path.touch() + + result = find_package_manifest(bundle_path) + assert result is not None + assert result.address == "github.com/org/deep" + assert result.version == "2.0.0" + + def test_malformed_manifest_raises(self, tmp_path: Path): + """Malformed METHODS.toml raises ManifestParseError.""" + (tmp_path / MANIFEST_FILENAME).write_text("[broken\n") + bundle_path = tmp_path / "bundle.mthds" + bundle_path.touch() + + with pytest.raises(ManifestParseError): + find_package_manifest(bundle_path) diff --git a/tests/unit/pipelex/core/packages/test_lock_file.py b/tests/unit/pipelex/core/packages/test_lock_file.py new file mode 100644 index 000000000..d72a96a21 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_lock_file.py @@ -0,0 +1,345 @@ +from pathlib import Path + +import pytest +from pydantic import ValidationError + +from pipelex.core.packages.dependency_resolver import ResolvedDependency +from pipelex.core.packages.exceptions import IntegrityError, LockFileError +from pipelex.core.packages.lock_file import ( + LockedPackage, + LockFile, + compute_directory_hash, + generate_lock_file, + parse_lock_file, + serialize_lock_file, + verify_locked_package, +) +from pipelex.core.packages.manifest import MthdsPackageManifest, PackageDependency +from pipelex.core.packages.package_cache import store_in_cache +from tests.unit.pipelex.core.packages.test_data import ( + EMPTY_LOCK_FILE_TOML, + INVALID_HASH_LOCK_FILE_TOML, + LOCK_FILE_TOML, +) + + +class TestLockFile: + """Tests for lock file models, parsing, serialization, hashing, and verification.""" + + # ---------------------------------------------------------------- + # Parsing + # ---------------------------------------------------------------- + + def test_parse_lock_file(self): + """Parse a 2-entry TOML, assert addresses/versions/hashes/sources.""" + lock = parse_lock_file(LOCK_FILE_TOML) + assert len(lock.packages) == 2 + + doc_pkg = lock.packages["github.com/pipelexlab/document-processing"] + assert doc_pkg.version == "1.2.3" + assert doc_pkg.hash == "sha256:a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2" + assert doc_pkg.source == "https://github.com/pipelexlab/document-processing" + + scoring_pkg = lock.packages["github.com/pipelexlab/scoring-lib"] + assert scoring_pkg.version == "0.5.1" + assert scoring_pkg.hash == "sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" + assert scoring_pkg.source == "https://github.com/pipelexlab/scoring-lib" + + def test_parse_empty_lock_file(self): + """Empty content produces an empty LockFile.""" + lock = parse_lock_file(EMPTY_LOCK_FILE_TOML) + assert lock.packages == {} + + def test_parse_invalid_toml_raises(self): + """Bad TOML syntax raises LockFileError.""" + with pytest.raises(LockFileError, match="Invalid TOML syntax"): + parse_lock_file('[broken\nversion = "oops"') + + def test_parse_invalid_hash_raises(self): + """Wrong hash prefix raises LockFileError.""" + with pytest.raises(LockFileError, match="Invalid lock file entry"): + parse_lock_file(INVALID_HASH_LOCK_FILE_TOML) + + # ---------------------------------------------------------------- + # Serialization + # ---------------------------------------------------------------- + + def test_serialize_lock_file(self): + """Serialize a model and assert TOML structure.""" + lock = LockFile( + packages={ + "github.com/org/repo": LockedPackage( + version="1.0.0", + hash="sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + source="https://github.com/org/repo", + ), + } + ) + toml_str = serialize_lock_file(lock) + assert '["github.com/org/repo"]' in toml_str + assert 'version = "1.0.0"' in toml_str + assert "sha256:aaa" in toml_str + assert 'source = "https://github.com/org/repo"' in toml_str + + def test_serialize_roundtrip(self): + """Parse -> serialize -> parse yields the same model.""" + original = parse_lock_file(LOCK_FILE_TOML) + toml_str = serialize_lock_file(original) + roundtripped = parse_lock_file(toml_str) + assert roundtripped.packages.keys() == original.packages.keys() + for address in original.packages: + assert roundtripped.packages[address].version == original.packages[address].version + assert roundtripped.packages[address].hash == original.packages[address].hash + assert roundtripped.packages[address].source == original.packages[address].source + + def test_serialize_deterministic_order(self): + """Entries are sorted by address regardless of insertion order.""" + lock = LockFile( + packages={ + "github.com/zzz/last": LockedPackage( + version="2.0.0", + hash="sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + source="https://github.com/zzz/last", + ), + "github.com/aaa/first": LockedPackage( + version="1.0.0", + hash="sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + source="https://github.com/aaa/first", + ), + } + ) + toml_str = serialize_lock_file(lock) + aaa_pos = toml_str.index("aaa/first") + zzz_pos = toml_str.index("zzz/last") + assert aaa_pos < zzz_pos + + # ---------------------------------------------------------------- + # Hash computation + # ---------------------------------------------------------------- + + def test_compute_directory_hash_deterministic(self, tmp_path: Path): + """Same directory hashed twice yields the same result.""" + pkg_dir = tmp_path / "pkg" + pkg_dir.mkdir() + (pkg_dir / "file.txt").write_text("hello") + hash_one = compute_directory_hash(pkg_dir) + hash_two = compute_directory_hash(pkg_dir) + assert hash_one == hash_two + assert hash_one.startswith("sha256:") + assert len(hash_one) == len("sha256:") + 64 + + def test_compute_directory_hash_content_sensitive(self, tmp_path: Path): + """Changed content produces a different hash.""" + dir_a = tmp_path / "dir_a" + dir_a.mkdir() + (dir_a / "file.txt").write_text("content A") + + dir_b = tmp_path / "dir_b" + dir_b.mkdir() + (dir_b / "file.txt").write_text("content B") + + assert compute_directory_hash(dir_a) != compute_directory_hash(dir_b) + + def test_compute_directory_hash_path_sensitive(self, tmp_path: Path): + """Same content but different filename produces a different hash.""" + dir_a = tmp_path / "dir_a" + dir_a.mkdir() + (dir_a / "alpha.txt").write_text("same") + + dir_b = tmp_path / "dir_b" + dir_b.mkdir() + (dir_b / "beta.txt").write_text("same") + + assert compute_directory_hash(dir_a) != compute_directory_hash(dir_b) + + def test_compute_directory_hash_skips_git_dir(self, tmp_path: Path): + """Files inside .git/ are excluded from the hash.""" + pkg_dir = tmp_path / "pkg" + pkg_dir.mkdir() + (pkg_dir / "file.txt").write_text("hello") + + hash_without_git = compute_directory_hash(pkg_dir) + + # Add .git/ contents + git_dir = pkg_dir / ".git" + git_dir.mkdir() + (git_dir / "HEAD").write_text("ref: refs/heads/main\n") + (git_dir / "config").write_text("[core]\n") + + hash_with_git = compute_directory_hash(pkg_dir) + assert hash_without_git == hash_with_git + + def test_compute_directory_hash_nonexistent_raises(self, tmp_path: Path): + """Non-existent directory raises LockFileError.""" + with pytest.raises(LockFileError, match="does not exist"): + compute_directory_hash(tmp_path / "nonexistent") + + # ---------------------------------------------------------------- + # Verification + # ---------------------------------------------------------------- + + def test_verify_locked_package_success(self, tmp_path: Path): + """Build + verify matching hash passes without error.""" + cache_root = tmp_path / "cache" + source_dir = tmp_path / "source" + source_dir.mkdir() + (source_dir / "METHODS.toml").write_text("[package]\n") + (source_dir / "data.mthds").write_text("bundle content\n") + + address = "github.com/org/repo" + version = "1.0.0" + cached_path = store_in_cache(source_dir, address, version, cache_root=cache_root) + + expected_hash = compute_directory_hash(cached_path) + locked = LockedPackage( + version=version, + hash=expected_hash, + source=f"https://{address}", + ) + + # Should not raise + verify_locked_package(locked, address, cache_root=cache_root) + + def test_verify_locked_package_mismatch(self, tmp_path: Path): + """Modified content raises IntegrityError.""" + cache_root = tmp_path / "cache" + source_dir = tmp_path / "source" + source_dir.mkdir() + (source_dir / "METHODS.toml").write_text("[package]\n") + + address = "github.com/org/repo" + version = "1.0.0" + cached_path = store_in_cache(source_dir, address, version, cache_root=cache_root) + + # Record a fake hash + locked = LockedPackage( + version=version, + hash="sha256:0000000000000000000000000000000000000000000000000000000000000000", + source=f"https://{address}", + ) + + # Cached content doesn't match the fake hash + assert compute_directory_hash(cached_path) != locked.hash + with pytest.raises(IntegrityError, match="Integrity check failed"): + verify_locked_package(locked, address, cache_root=cache_root) + + def test_verify_locked_package_not_cached(self, tmp_path: Path): + """Missing cache directory raises IntegrityError.""" + locked = LockedPackage( + version="1.0.0", + hash="sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + source="https://github.com/org/missing", + ) + with pytest.raises(IntegrityError, match="not found"): + verify_locked_package(locked, "github.com/org/missing", cache_root=tmp_path) + + # ---------------------------------------------------------------- + # Lock file generation + # ---------------------------------------------------------------- + + def test_generate_lock_file_remote_only(self, tmp_path: Path): + """1 local + 1 remote dep: only the remote appears in the lock file.""" + # Set up a cached remote package + remote_dir = tmp_path / "remote_src" + remote_dir.mkdir() + (remote_dir / "METHODS.toml").write_text("[package]\n") + (remote_dir / "main.mthds").write_text("content\n") + + manifest = MthdsPackageManifest( + address="github.com/org/consumer", + version="1.0.0", + description="Consumer package", + dependencies=[ + PackageDependency( + alias="local_dep", + address="github.com/org/local", + version="1.0.0", + path="../local", + ), + PackageDependency( + alias="remote_dep", + address="github.com/org/remote", + version="2.0.0", + ), + ], + ) + + remote_manifest = MthdsPackageManifest( + address="github.com/org/remote", + version="2.0.0", + description="Remote package", + ) + + resolved_deps = [ + ResolvedDependency( + alias="local_dep", + address="github.com/org/local", + manifest=None, + package_root=tmp_path / "local", + mthds_files=[], + exported_pipe_codes=None, + ), + ResolvedDependency( + alias="remote_dep", + address="github.com/org/remote", + manifest=remote_manifest, + package_root=remote_dir, + mthds_files=[], + exported_pipe_codes=None, + ), + ] + + lock = generate_lock_file(manifest, resolved_deps) + + assert len(lock.packages) == 1 + assert "github.com/org/remote" in lock.packages + assert lock.packages["github.com/org/remote"].version == "2.0.0" + assert lock.packages["github.com/org/remote"].source == "https://github.com/org/remote" + assert lock.packages["github.com/org/remote"].hash.startswith("sha256:") + + def test_generate_lock_file_empty_no_remote(self, tmp_path: Path): + """Only local deps produce an empty lock file.""" + manifest = MthdsPackageManifest( + address="github.com/org/consumer", + version="1.0.0", + description="Consumer with only local deps", + dependencies=[ + PackageDependency( + alias="local_only", + address="github.com/org/local", + version="1.0.0", + path="../local", + ), + ], + ) + + local_dir = tmp_path / "local" + local_dir.mkdir() + + resolved_deps = [ + ResolvedDependency( + alias="local_only", + address="github.com/org/local", + manifest=None, + package_root=local_dir, + mthds_files=[], + exported_pipe_codes=None, + ), + ] + + lock = generate_lock_file(manifest, resolved_deps) + assert lock.packages == {} + + # ---------------------------------------------------------------- + # Model frozen + # ---------------------------------------------------------------- + + def test_locked_package_model_frozen(self): + """Mutation attempt raises an error on the frozen model.""" + locked = LockedPackage( + version="1.0.0", + hash="sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + source="https://github.com/org/repo", + ) + with pytest.raises(ValidationError): + locked.version = "2.0.0" # type: ignore[misc] diff --git a/tests/unit/pipelex/core/packages/test_manifest.py b/tests/unit/pipelex/core/packages/test_manifest.py new file mode 100644 index 000000000..a7cb7128f --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_manifest.py @@ -0,0 +1,468 @@ +import pytest +from pydantic import ValidationError + +from pipelex.core.packages.manifest import DomainExports, MthdsPackageManifest, PackageDependency + + +class TestMthdsPackageManifest: + """Tests for manifest model validation.""" + + def test_valid_full_manifest(self): + """Valid manifest with all fields populated.""" + manifest = MthdsPackageManifest( + address="github.com/pipelexlab/legal-tools", + version="1.0.0", + description="Legal analysis", + authors=["Alice", "Bob"], + license="MIT", + mthds_version="0.5.0", + dependencies=[ + PackageDependency(address="github.com/org/dep", version="2.0.0", alias="my_dep"), + ], + exports=[ + DomainExports(domain_path="legal.contracts", pipes=["extract_clause"]), + ], + ) + assert manifest.address == "github.com/pipelexlab/legal-tools" + assert manifest.version == "1.0.0" + assert len(manifest.dependencies) == 1 + assert manifest.dependencies[0].alias == "my_dep" + assert len(manifest.exports) == 1 + assert manifest.exports[0].domain_path == "legal.contracts" + + def test_valid_minimal_manifest(self): + """Minimal manifest with only required fields.""" + manifest = MthdsPackageManifest( + address="github.com/org/pkg", + version="0.1.0", + description="Minimal test package", + ) + assert manifest.address == "github.com/org/pkg" + assert manifest.version == "0.1.0" + assert manifest.description == "Minimal test package" + assert manifest.authors == [] + assert manifest.dependencies == [] + assert manifest.exports == [] + + def test_missing_description_fails(self): + """Missing description should fail validation.""" + with pytest.raises(ValidationError): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + ) # type: ignore[call-arg] + + def test_empty_description_fails(self): + """Empty description should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description=" ", + ) + + def test_invalid_address_no_hostname(self): + """Address without hostname pattern should fail.""" + with pytest.raises(ValidationError, match="Invalid package address"): + MthdsPackageManifest( + address="no-dots-or-slashes", + version="1.0.0", + description="Test", + ) + + def test_invalid_address_no_slash(self): + """Address with dots but no slash should fail.""" + with pytest.raises(ValidationError, match="Invalid package address"): + MthdsPackageManifest( + address="github.com", + version="1.0.0", + description="Test", + ) + + def test_invalid_version_not_semver(self): + """Non-semver version should fail.""" + with pytest.raises(ValidationError, match="Invalid version"): + MthdsPackageManifest( + address="github.com/org/repo", + version="not-a-version", + description="Test", + ) + + def test_invalid_version_partial(self): + """Partial semver should fail.""" + with pytest.raises(ValidationError, match="Invalid version"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0", + description="Test", + ) + + def test_valid_semver_with_prerelease(self): + """Semver with prerelease tag should pass.""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0-beta.1", + description="Test", + ) + assert manifest.version == "1.0.0-beta.1" + + def test_duplicate_dependency_aliases(self): + """Duplicate aliases should fail validation.""" + with pytest.raises(ValidationError, match="Duplicate dependency alias"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + dependencies=[ + PackageDependency(address="github.com/org/dep1", version="1.0.0", alias="same_alias"), + PackageDependency(address="github.com/org/dep2", version="2.0.0", alias="same_alias"), + ], + ) + + def test_invalid_dependency_alias_not_snake_case(self): + """Dependency alias that is not snake_case should fail.""" + with pytest.raises(ValidationError, match="Invalid dependency alias"): + PackageDependency( + address="github.com/org/dep", + version="1.0.0", + alias="NotSnakeCase", + ) + + @pytest.mark.parametrize( + "reserved_domain", + ["native", "mthds", "pipelex"], + ) + def test_reserved_domain_exact_in_exports_rejected(self, reserved_domain: str): + """Exact reserved domain names in exports should be rejected.""" + with pytest.raises(ValidationError, match="reserved domain"): + DomainExports( + domain_path=reserved_domain, + pipes=["some_pipe"], + ) + + @pytest.mark.parametrize( + "reserved_domain_path", + ["native.concepts", "mthds.core", "pipelex.internal"], + ) + def test_reserved_domain_prefix_in_exports_rejected(self, reserved_domain_path: str): + """Hierarchical paths starting with a reserved domain should be rejected.""" + with pytest.raises(ValidationError, match="reserved domain"): + DomainExports( + domain_path=reserved_domain_path, + pipes=["some_pipe"], + ) + + @pytest.mark.parametrize( + "safe_domain", + ["legal", "my_native_utils", "pipeline", "scoring"], + ) + def test_non_reserved_domain_accepted(self, safe_domain: str): + """Domain names that are not reserved should pass validation.""" + export = DomainExports( + domain_path=safe_domain, + pipes=["some_pipe"], + ) + assert export.domain_path == safe_domain + + def test_invalid_domain_path_in_exports(self): + """Invalid domain path in exports should fail.""" + with pytest.raises(ValidationError, match="Invalid domain path"): + DomainExports( + domain_path="InvalidDomain", + pipes=["my_pipe"], + ) + + def test_invalid_pipe_name_in_exports(self): + """Invalid pipe name in exports should fail.""" + with pytest.raises(ValidationError, match="Invalid pipe name"): + DomainExports( + domain_path="valid_domain", + pipes=["InvalidPipeName"], + ) + + def test_valid_hierarchical_domain_in_exports(self): + """Hierarchical domain path in exports should pass.""" + export = DomainExports( + domain_path="legal.contracts.shareholder", + pipes=["extract_clause"], + ) + assert export.domain_path == "legal.contracts.shareholder" + + def test_empty_dependencies_and_exports(self): + """Empty lists for dependencies and exports should pass.""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + dependencies=[], + exports=[], + ) + assert manifest.dependencies == [] + assert manifest.exports == [] + + @pytest.mark.parametrize( + "version_str", + [ + "^1.0.0", + "~1.0.0", + ">=1.0.0", + "<=2.0.0", + ">1.0.0", + "<2.0.0", + "==1.0.0", + "!=1.0.0", + ">=1.0.0, <2.0.0", + "*", + "1.*", + "1.0.*", + "1.0.0", + "2.1.3-beta.1", + ], + ) + def test_valid_dependency_version_constraints(self, version_str: str): + """Version constraints using Poetry/uv range syntax should pass.""" + dep = PackageDependency( + address="github.com/org/dep", + version=version_str, + alias="my_dep", + ) + assert dep.version == version_str + + @pytest.mark.parametrize( + "version_str", + [ + "not-a-version", + "abc", + "1.0.0.0", + ">>1.0.0", + "~=1.0.0", + ], + ) + def test_invalid_dependency_version_constraints(self, version_str: str): + """Invalid version constraint strings should fail.""" + with pytest.raises(ValidationError, match="Invalid version constraint"): + PackageDependency( + address="github.com/org/dep", + version=version_str, + alias="my_dep", + ) + + @pytest.mark.parametrize( + "mthds_version", + ["1.0.0", "^1.0.0", "~1.0.0", ">=1.0.0", "*"], + ) + def test_valid_mthds_version_constraints(self, mthds_version: str): + """Valid mthds_version constraints should pass validation.""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + mthds_version=mthds_version, + ) + assert manifest.mthds_version == mthds_version + + @pytest.mark.parametrize( + "mthds_version", + ["not-a-version", "abc", ">>1.0.0"], + ) + def test_invalid_mthds_version_constraints(self, mthds_version: str): + """Invalid mthds_version constraints should fail validation.""" + with pytest.raises(ValidationError, match="Invalid mthds_version constraint"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + mthds_version=mthds_version, + ) + + def test_none_mthds_version_accepted(self): + """mthds_version=None should pass validation.""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + mthds_version=None, + ) + assert manifest.mthds_version is None + + # --- Authors validation --- + + def test_empty_author_string_fails(self): + """An empty string in authors should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty or whitespace"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + authors=[""], + ) + + def test_whitespace_author_string_fails(self): + """A whitespace-only string in authors should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty or whitespace"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + authors=[" "], + ) + + def test_mixed_valid_and_empty_author_fails(self): + """A mix of valid and empty authors should fail validation.""" + with pytest.raises(ValidationError, match="Author at index 1"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + authors=["Alice", ""], + ) + + # --- License validation --- + + def test_empty_license_string_fails(self): + """An empty license string should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty or whitespace"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + license="", + ) + + def test_whitespace_license_string_fails(self): + """A whitespace-only license string should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty or whitespace"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + license=" ", + ) + + # --- extra="forbid" tests --- + + def test_manifest_rejects_unknown_fields(self): + """Unknown fields on MthdsPackageManifest should be rejected by extra='forbid'.""" + with pytest.raises(ValidationError, match="extra_forbidden"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + unknown_field="x", # type: ignore[call-arg] + ) + + def test_dependency_rejects_unknown_fields(self): + """Unknown fields on PackageDependency should be rejected by extra='forbid'.""" + with pytest.raises(ValidationError, match="extra_forbidden"): + PackageDependency( + address="github.com/org/dep", + version="1.0.0", + alias="my_dep", + unknown_field="x", # type: ignore[call-arg] + ) + + def test_domain_exports_rejects_unknown_fields(self): + """Unknown fields on DomainExports should be rejected by extra='forbid'.""" + with pytest.raises(ValidationError, match="extra_forbidden"): + DomainExports( + domain_path="legal", + pipes=["my_pipe"], + unknown_field="x", # type: ignore[call-arg] + ) + + # --- Description whitespace variants --- + + @pytest.mark.parametrize( + "whitespace_description", + ["\t", "\n", " \t\n "], + ) + def test_whitespace_only_description_fails(self, whitespace_description: str): + """Various whitespace-only descriptions should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description=whitespace_description, + ) + + # --- Display name validation --- + + def test_valid_display_name(self): + """A valid display_name should be stored.""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name="Legal Tools", + ) + assert manifest.display_name == "Legal Tools" + + def test_display_name_with_emoji(self): + """Emoji characters in display_name should pass.""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name="\U0001f680 Legal Tools", + ) + assert manifest.display_name == "\U0001f680 Legal Tools" + + def test_none_display_name_accepted(self): + """display_name=None should pass validation (default).""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name=None, + ) + assert manifest.display_name is None + + def test_empty_display_name_fails(self): + """Empty display_name should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty or whitespace"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name="", + ) + + def test_whitespace_display_name_fails(self): + """Whitespace-only display_name should fail validation.""" + with pytest.raises(ValidationError, match="must not be empty or whitespace"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name=" ", + ) + + def test_display_name_too_long_fails(self): + """display_name exceeding 128 characters should fail validation.""" + with pytest.raises(ValidationError, match="must not exceed 128 characters"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name="x" * 129, + ) + + def test_display_name_with_control_chars_fails(self): + """display_name containing control characters should fail validation.""" + with pytest.raises(ValidationError, match="must not contain control characters"): + MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name="Legal\x00Tools", + ) + + def test_display_name_strips_whitespace(self): + """display_name with leading/trailing whitespace should be stripped.""" + manifest = MthdsPackageManifest( + address="github.com/org/repo", + version="1.0.0", + description="Test", + display_name=" Legal Tools ", + ) + assert manifest.display_name == "Legal Tools" diff --git a/tests/unit/pipelex/core/packages/test_manifest_parser.py b/tests/unit/pipelex/core/packages/test_manifest_parser.py new file mode 100644 index 000000000..c1702def5 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_manifest_parser.py @@ -0,0 +1,140 @@ +import pytest + +from pipelex.core.packages.exceptions import ManifestParseError, ManifestValidationError +from pipelex.core.packages.manifest_parser import parse_methods_toml, serialize_manifest_to_toml +from tests.unit.pipelex.core.packages.test_data import ( + EMPTY_EXPORTS_DEPS_TOML, + FULL_MANIFEST_TOML, + INVALID_DOMAIN_PATH_EXPORTS_TOML, + INVALID_PIPE_NAME_EXPORTS_TOML, + INVALID_TOML_SYNTAX, + MINIMAL_MANIFEST_TOML, + MISSING_PACKAGE_SECTION_TOML, + MISSING_REQUIRED_FIELDS_TOML, + MULTI_LEVEL_EXPORTS_TOML, + NON_LIST_PIPES_EXPORTS_TOML, + NON_TABLE_DEPENDENCY_TOML, + RESERVED_DOMAIN_EXPORTS_TOML, + UNKNOWN_PACKAGE_KEYS_TOML, + ManifestTestData, +) + + +class TestManifestParser: + """Tests for METHODS.toml parsing and serialization.""" + + def test_parse_full_manifest(self): + """Parse a well-formed TOML with nested exports sub-tables.""" + manifest = parse_methods_toml(FULL_MANIFEST_TOML) + assert manifest.address == ManifestTestData.FULL_MANIFEST.address + assert manifest.version == ManifestTestData.FULL_MANIFEST.version + assert manifest.description == ManifestTestData.FULL_MANIFEST.description + assert manifest.authors == ManifestTestData.FULL_MANIFEST.authors + assert manifest.license == ManifestTestData.FULL_MANIFEST.license + assert manifest.display_name == ManifestTestData.FULL_MANIFEST.display_name + assert manifest.mthds_version == ManifestTestData.FULL_MANIFEST.mthds_version + assert len(manifest.dependencies) == 1 + assert manifest.dependencies[0].alias == "scoring_lib" + assert manifest.dependencies[0].address == "github.com/pipelexlab/scoring-lib" + assert len(manifest.exports) == 2 + domain_paths = {exp.domain_path for exp in manifest.exports} + assert "legal.contracts" in domain_paths + assert "scoring" in domain_paths + + def test_parse_minimal_manifest(self): + """Parse a manifest with only required fields.""" + manifest = parse_methods_toml(MINIMAL_MANIFEST_TOML) + assert manifest.address == ManifestTestData.MINIMAL_MANIFEST.address + assert manifest.version == ManifestTestData.MINIMAL_MANIFEST.version + assert manifest.display_name is None + assert manifest.dependencies == [] + assert manifest.exports == [] + + def test_parse_empty_exports_and_deps(self): + """Parse a manifest with empty exports and dependencies sections.""" + manifest = parse_methods_toml(EMPTY_EXPORTS_DEPS_TOML) + assert manifest.dependencies == [] + assert manifest.exports == [] + + def test_parse_multi_level_nested_exports(self): + """Parse manifest with multi-level nested exports like [exports.legal.contracts.shareholder].""" + manifest = parse_methods_toml(MULTI_LEVEL_EXPORTS_TOML) + domain_paths = {exp.domain_path for exp in manifest.exports} + assert "legal.contracts.shareholder" in domain_paths + assert "legal.contracts" in domain_paths + assert "scoring" in domain_paths + + # Check pipes for each domain + shareholder_exports = next(exp for exp in manifest.exports if exp.domain_path == "legal.contracts.shareholder") + assert shareholder_exports.pipes == ["extract_shareholder_clause"] + + contracts_exports = next(exp for exp in manifest.exports if exp.domain_path == "legal.contracts") + assert contracts_exports.pipes == ["extract_clause"] + + def test_parse_invalid_toml_syntax(self): + """TOML syntax error should raise ManifestParseError.""" + with pytest.raises(ManifestParseError, match="Invalid TOML syntax"): + parse_methods_toml(INVALID_TOML_SYNTAX) + + def test_parse_missing_package_section(self): + """Missing [package] section should raise ManifestValidationError.""" + with pytest.raises(ManifestValidationError, match="must contain a \\[package\\] section"): + parse_methods_toml(MISSING_PACKAGE_SECTION_TOML) + + def test_parse_missing_required_fields(self): + """Missing required fields in [package] should raise ManifestValidationError.""" + with pytest.raises(ManifestValidationError, match="validation failed"): + parse_methods_toml(MISSING_REQUIRED_FIELDS_TOML) + + def test_parse_non_table_dependency_raises(self): + """A dependency whose value is not a table should raise ManifestValidationError.""" + with pytest.raises(ManifestValidationError, match="expected a table"): + parse_methods_toml(NON_TABLE_DEPENDENCY_TOML) + + @pytest.mark.parametrize( + ("topic", "toml_content"), + [ + ("invalid domain path", INVALID_DOMAIN_PATH_EXPORTS_TOML), + ("invalid pipe name", INVALID_PIPE_NAME_EXPORTS_TOML), + ], + ) + def test_parse_invalid_exports_raises(self, topic: str, toml_content: str): + """Invalid domain paths or pipe names in [exports] should raise ManifestValidationError.""" + _ = topic # Used for test identification + with pytest.raises(ManifestValidationError, match="Invalid exports"): + parse_methods_toml(toml_content) + + def test_parse_non_list_pipes_raises(self): + """A non-list value for 'pipes' should raise ManifestValidationError, not be silently dropped.""" + with pytest.raises(ManifestValidationError, match="must be a list"): + parse_methods_toml(NON_LIST_PIPES_EXPORTS_TOML) + + def test_parse_reserved_domain_in_exports_raises(self): + """Reserved domain in [exports] should raise ManifestValidationError.""" + with pytest.raises(ManifestValidationError, match="Invalid exports"): + parse_methods_toml(RESERVED_DOMAIN_EXPORTS_TOML) + + def test_serialize_roundtrip(self): + """Serialize a manifest to TOML and parse it back — roundtrip check.""" + original = ManifestTestData.FULL_MANIFEST + toml_str = serialize_manifest_to_toml(original) + parsed = parse_methods_toml(toml_str) + assert parsed.address == original.address + assert parsed.version == original.version + assert parsed.description == original.description + assert len(parsed.dependencies) == len(original.dependencies) + assert len(parsed.exports) == len(original.exports) + + def test_serialize_minimal_manifest(self): + """Serialize a minimal manifest with no deps/exports.""" + manifest = ManifestTestData.MINIMAL_MANIFEST + toml_str = serialize_manifest_to_toml(manifest) + assert "[package]" in toml_str + assert 'address = "github.com/pipelexlab/minimal"' in toml_str + assert "[dependencies]" not in toml_str + assert "[exports" not in toml_str + + def test_parse_unknown_package_keys_raises(self): + """Unknown keys in [package] section should raise ManifestValidationError.""" + with pytest.raises(ManifestValidationError, match="Unknown keys in \\[package\\] section"): + parse_methods_toml(UNKNOWN_PACKAGE_KEYS_TOML) diff --git a/tests/unit/pipelex/core/packages/test_package_cache.py b/tests/unit/pipelex/core/packages/test_package_cache.py new file mode 100644 index 000000000..e56b1a854 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_package_cache.py @@ -0,0 +1,78 @@ +from pathlib import Path + +from pipelex.core.packages.package_cache import ( + get_cached_package_path, + is_cached, + remove_cached_package, + store_in_cache, +) + + +class TestPackageCache: + """Unit tests for package cache operations using tmp_path.""" + + def test_get_cached_package_path_structure(self, tmp_path: Path): + """Cache path follows {root}/{address}/{version}/ layout.""" + result = get_cached_package_path("github.com/org/repo", "1.0.0", cache_root=tmp_path) + assert result == tmp_path / "github.com/org/repo" / "1.0.0" + + def test_is_cached_false_when_empty(self, tmp_path: Path): + """Cache miss when directory does not exist.""" + assert is_cached("github.com/org/repo", "1.0.0", cache_root=tmp_path) is False + + def test_store_and_is_cached(self, tmp_path: Path): + """Round-trip: store then lookup returns True.""" + source_dir = tmp_path / "source" + source_dir.mkdir() + (source_dir / "METHODS.toml").write_text("[package]\n") + + result = store_in_cache(source_dir, "github.com/org/repo", "1.0.0", cache_root=tmp_path) + + assert result.is_dir() + assert is_cached("github.com/org/repo", "1.0.0", cache_root=tmp_path) is True + + def test_store_removes_dot_git(self, tmp_path: Path): + """.git/ directory is not present in the cached copy.""" + source_dir = tmp_path / "source" + source_dir.mkdir() + (source_dir / "METHODS.toml").write_text("[package]\n") + git_dir = source_dir / ".git" + git_dir.mkdir() + (git_dir / "HEAD").write_text("ref: refs/heads/main\n") + + result = store_in_cache(source_dir, "github.com/org/repo", "1.0.0", cache_root=tmp_path) + + assert not (result / ".git").exists() + + def test_store_preserves_package_content(self, tmp_path: Path): + """METHODS.toml and .mthds subdirectory content survive caching.""" + source_dir = tmp_path / "source" + source_dir.mkdir() + (source_dir / "METHODS.toml").write_text("[package]\naddress = 'test'\n") + mthds_dir = source_dir / ".mthds" + mthds_dir.mkdir() + (mthds_dir / "main.mthds").write_text("bundle content\n") + + result = store_in_cache(source_dir, "github.com/org/repo", "1.0.0", cache_root=tmp_path) + + assert (result / "METHODS.toml").is_file() + assert (result / ".mthds" / "main.mthds").is_file() + assert (result / "METHODS.toml").read_text() == "[package]\naddress = 'test'\n" + + def test_remove_cached_package(self, tmp_path: Path): + """Removing a cached package returns True and deletes the directory.""" + source_dir = tmp_path / "source" + source_dir.mkdir() + (source_dir / "data.txt").write_text("content") + + store_in_cache(source_dir, "github.com/org/repo", "1.0.0", cache_root=tmp_path) + assert is_cached("github.com/org/repo", "1.0.0", cache_root=tmp_path) is True + + removed = remove_cached_package("github.com/org/repo", "1.0.0", cache_root=tmp_path) + assert removed is True + assert is_cached("github.com/org/repo", "1.0.0", cache_root=tmp_path) is False + + def test_remove_not_cached_returns_false(self, tmp_path: Path): + """Removing a non-existent cache entry returns False.""" + removed = remove_cached_package("github.com/org/missing", "9.9.9", cache_root=tmp_path) + assert removed is False diff --git a/tests/unit/pipelex/core/packages/test_publish_validation.py b/tests/unit/pipelex/core/packages/test_publish_validation.py new file mode 100644 index 000000000..0494e9d15 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_publish_validation.py @@ -0,0 +1,432 @@ +import shutil +import subprocess # noqa: S404 +import textwrap +from pathlib import Path +from unittest.mock import MagicMock + +from pytest_mock import MockerFixture + +from pipelex.core.packages.discovery import MANIFEST_FILENAME +from pipelex.core.packages.publish_validation import ( + IssueCategory, + IssueLevel, + PublishValidationIssue, + PublishValidationResult, + validate_for_publish, +) + +PACKAGES_DATA_DIR = Path(__file__).resolve().parent.parent.parent.parent.parent / "data" / "packages" + + +def _issues_by_category(result: PublishValidationResult, category: IssueCategory) -> list[PublishValidationIssue]: + return [issue for issue in result.issues if issue.category == category] + + +def _issues_by_level_warning(result: PublishValidationResult) -> list[PublishValidationIssue]: + return [issue for issue in result.issues if issue.level.is_warning] + + +class TestPublishValidation: + """Tests for publish validation logic.""" + + def test_issue_level_properties(self) -> None: + """IssueLevel.is_error and is_warning are mutually exclusive and exhaustive.""" + assert IssueLevel.ERROR.is_error is True + assert IssueLevel.ERROR.is_warning is False + assert IssueLevel.WARNING.is_error is False + assert IssueLevel.WARNING.is_warning is True + + def test_valid_package_passes(self, tmp_path: Path) -> None: + """legal_tools with full manifest, bundles, and exports -> is_publishable=True (git checks off).""" + src_dir = PACKAGES_DATA_DIR / "legal_tools" + pkg_dir = tmp_path / "legal_tools" + shutil.copytree(src_dir, pkg_dir) + + result = validate_for_publish(pkg_dir, check_git=False) + + # legal_tools has a remote dep but no lock file, so there will be a lock file error + # Filter out lock file issues for this test — the package is otherwise valid + non_lock_errors = [issue for issue in result.issues if issue.level.is_error and issue.category != IssueCategory.LOCK_FILE] + assert not non_lock_errors, f"Unexpected errors: {non_lock_errors}" + + def test_no_manifest_errors(self, tmp_path: Path) -> None: + """Empty directory -> manifest ERROR.""" + result = validate_for_publish(tmp_path, check_git=False) + + assert not result.is_publishable + manifest_errors = _issues_by_category(result, IssueCategory.MANIFEST) + assert len(manifest_errors) == 1 + assert manifest_errors[0].level.is_error + assert MANIFEST_FILENAME in manifest_errors[0].message + + def test_no_bundles_errors(self, tmp_path: Path) -> None: + """Manifest but no .mthds files -> bundle ERROR.""" + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/no-bundles" + version = "1.0.0" + description = "No bundles" + authors = ["Test"] + license = "MIT" + """) + (tmp_path / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(tmp_path, check_git=False) + + assert not result.is_publishable + bundle_errors = _issues_by_category(result, IssueCategory.BUNDLE) + assert len(bundle_errors) == 1 + assert bundle_errors[0].level.is_error + assert ".mthds" in bundle_errors[0].message + + def test_missing_authors_warns(self, tmp_path: Path) -> None: + """minimal_package has no authors -> WARNING.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "minimal_package" + shutil.copytree(src_dir, pkg_dir) + + result = validate_for_publish(pkg_dir, check_git=False) + + warnings = _issues_by_level_warning(result) + author_warnings = [warning for warning in warnings if "authors" in warning.message.lower()] + assert len(author_warnings) == 1 + + def test_missing_license_warns(self, tmp_path: Path) -> None: + """minimal_package has no license -> WARNING.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "minimal_package" + shutil.copytree(src_dir, pkg_dir) + + result = validate_for_publish(pkg_dir, check_git=False) + + warnings = _issues_by_level_warning(result) + license_warnings = [warning for warning in warnings if "license" in warning.message.lower()] + assert len(license_warnings) == 1 + + def test_phantom_export_errors(self, tmp_path: Path) -> None: + """Package with export listing a non-existent pipe -> EXPORT ERROR.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "phantom_export" + shutil.copytree(src_dir, pkg_dir) + + # Rewrite manifest to add an export for a pipe that doesn't exist + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/phantom" + version = "1.0.0" + description = "Phantom export test" + authors = ["Test"] + license = "MIT" + + [exports.pkg_test_minimal_core] + pipes = ["pkg_test_hello", "pkg_test_nonexistent_pipe"] + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(pkg_dir, check_git=False) + + export_errors = _issues_by_category(result, IssueCategory.EXPORT) + assert len(export_errors) == 1 + assert export_errors[0].level.is_error + assert "pkg_test_nonexistent_pipe" in export_errors[0].message + + def test_lock_file_missing_with_remote_deps_errors(self, tmp_path: Path) -> None: + """Manifest with remote dep but no methods.lock -> LOCK_FILE ERROR.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "missing_lock" + shutil.copytree(src_dir, pkg_dir) + + # Rewrite manifest to add a remote dependency + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/missing-lock" + version = "1.0.0" + description = "Missing lock test" + authors = ["Test"] + license = "MIT" + + [dependencies] + some_lib = { address = "github.com/test/some-lib", version = "1.0.0" } + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(pkg_dir, check_git=False) + + lock_errors = _issues_by_category(result, IssueCategory.LOCK_FILE) + assert len(lock_errors) == 1 + assert lock_errors[0].level.is_error + assert "methods.lock" in lock_errors[0].message + + def test_lock_file_not_required_without_remote_deps(self, tmp_path: Path) -> None: + """Local-only deps -> no lock file error.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "local_only" + shutil.copytree(src_dir, pkg_dir) + + # Rewrite manifest with a local path dependency + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/local-only" + version = "1.0.0" + description = "Local only test" + authors = ["Test"] + license = "MIT" + + [dependencies] + local_lib = { address = "github.com/test/local-lib", version = "1.0.0", path = "../local-lib" } + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(pkg_dir, check_git=False) + + lock_errors = _issues_by_category(result, IssueCategory.LOCK_FILE) + assert not lock_errors + + def test_wildcard_version_warns(self, tmp_path: Path) -> None: + """Dependency with version * -> DEPENDENCY WARNING.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "wildcard_dep" + shutil.copytree(src_dir, pkg_dir) + + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/wildcard" + version = "1.0.0" + description = "Wildcard dep test" + authors = ["Test"] + license = "MIT" + + [dependencies] + some_lib = { address = "github.com/test/some-lib", version = "*" } + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(pkg_dir, check_git=False) + + dep_warnings = _issues_by_category(result, IssueCategory.DEPENDENCY) + assert len(dep_warnings) == 1 + assert dep_warnings[0].level.is_warning + assert "wildcard" in dep_warnings[0].message.lower() + + def test_git_checks_skipped_when_disabled(self, tmp_path: Path) -> None: + """check_git=False -> no GIT issues regardless of git state.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "no_git" + shutil.copytree(src_dir, pkg_dir) + + result = validate_for_publish(pkg_dir, check_git=False) + + git_issues = _issues_by_category(result, IssueCategory.GIT) + assert not git_issues + + def test_result_includes_package_version_on_success(self, tmp_path: Path) -> None: + """Successful validation populates package_version from the parsed manifest.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "version_check" + shutil.copytree(src_dir, pkg_dir) + + result = validate_for_publish(pkg_dir, check_git=False) + + assert result.package_version is not None + assert result.package_version == "0.1.0" + + def test_result_has_no_package_version_when_manifest_missing(self, tmp_path: Path) -> None: + """Missing manifest -> package_version is None.""" + result = validate_for_publish(tmp_path, check_git=False) + + assert result.package_version is None + + def test_manifest_field_checks_produce_no_errors(self, tmp_path: Path) -> None: + """Manifest field checks only produce warnings (authors/license), never errors. + + Address, version, and description are validated by Pydantic validators + during parsing. If the manifest parsed successfully, those fields are + guaranteed valid — the field checker should not re-check them. + """ + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "manifest_fields" + shutil.copytree(src_dir, pkg_dir) + + result = validate_for_publish(pkg_dir, check_git=False) + + manifest_issues = _issues_by_category(result, IssueCategory.MANIFEST) + manifest_errors = [issue for issue in manifest_issues if issue.level.is_error] + assert not manifest_errors, f"Expected no MANIFEST errors, got: {manifest_errors}" + # minimal_package has no authors and no license -> exactly 2 warnings + manifest_warnings = [issue for issue in manifest_issues if issue.level.is_warning] + assert len(manifest_warnings) == 2 + warning_messages = {issue.message for issue in manifest_warnings} + assert any("authors" in msg.lower() for msg in warning_messages) + assert any("license" in msg.lower() for msg in warning_messages) + + def test_reserved_domain_in_bundle_errors(self, tmp_path: Path) -> None: + """Bundle with a reserved domain should produce a VISIBILITY ERROR mentioning 'reserved'.""" + # Write a valid manifest without reserved domains in exports + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/reserved-bundle" + version = "1.0.0" + description = "Reserved domain test" + authors = ["Test"] + license = "MIT" + """) + (tmp_path / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + # Write a .mthds bundle file that declares a reserved domain + bundle_content = textwrap.dedent("""\ + domain = "native" + + [pipe.some_pipe] + type = "PipeLLM" + description = "A test pipe" + output = "Text" + prompt = "Hello" + """) + (tmp_path / "reserved.mthds").write_text(bundle_content, encoding="utf-8") + + result = validate_for_publish(tmp_path, check_git=False) + + visibility_errors = _issues_by_category(result, IssueCategory.VISIBILITY) + reserved_errors = [issue for issue in visibility_errors if "reserved" in issue.message.lower()] + assert len(reserved_errors) == 1 + assert reserved_errors[0].level == IssueLevel.ERROR + + def test_reserved_domain_not_reported_twice(self, tmp_path: Path) -> None: + """Reserved domain violation must appear exactly once, not duplicated across categories.""" + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/reserved-dup" + version = "1.0.0" + description = "Dedup test" + authors = ["Test"] + license = "MIT" + """) + (tmp_path / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + bundle_content = textwrap.dedent("""\ + domain = "native" + + [pipe.some_pipe] + type = "PipeLLM" + description = "A test pipe" + output = "Text" + prompt = "Hello" + """) + (tmp_path / "reserved.mthds").write_text(bundle_content, encoding="utf-8") + + result = validate_for_publish(tmp_path, check_git=False) + + all_reserved = [issue for issue in result.issues if "reserved" in issue.message.lower()] + assert len(all_reserved) == 1, ( + f"Expected exactly 1 reserved-domain issue, got {len(all_reserved)} across categories: " + f"{[(issue.category, issue.message) for issue in all_reserved]}" + ) + + def test_valid_mthds_version_no_publish_errors(self, tmp_path: Path) -> None: + """Manifest with valid mthds_version should produce no mthds_version MANIFEST errors.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "valid_mthds_ver" + shutil.copytree(src_dir, pkg_dir) + + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/valid-mthds" + version = "1.0.0" + description = "Valid mthds_version test" + authors = ["Test"] + license = "MIT" + mthds_version = "^1.0.0" + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(pkg_dir, check_git=False) + + manifest_errors = _issues_by_category(result, IssueCategory.MANIFEST) + mthds_version_errors = [issue for issue in manifest_errors if "mthds_version" in issue.message] + assert not mthds_version_errors + + def test_absent_mthds_version_no_publish_errors(self, tmp_path: Path) -> None: + """Manifest without mthds_version should produce no mthds_version MANIFEST errors.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "no_mthds_ver" + shutil.copytree(src_dir, pkg_dir) + + result = validate_for_publish(pkg_dir, check_git=False) + + manifest_errors = _issues_by_category(result, IssueCategory.MANIFEST) + mthds_version_errors = [issue for issue in manifest_errors if "mthds_version" in issue.message] + assert not mthds_version_errors + + def test_unsatisfied_mthds_version_produces_warning(self, tmp_path: Path) -> None: + """Manifest with mthds_version targeting a future version should produce a WARNING.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "future_mthds_ver" + shutil.copytree(src_dir, pkg_dir) + + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/future-mthds" + version = "1.0.0" + description = "Future mthds_version test" + authors = ["Test"] + license = "MIT" + mthds_version = ">=99.0.0" + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(pkg_dir, check_git=False) + + manifest_issues = _issues_by_category(result, IssueCategory.MANIFEST) + satisfiability_warnings = [issue for issue in manifest_issues if issue.level == IssueLevel.WARNING and "not satisfied" in issue.message] + assert len(satisfiability_warnings) == 1 + assert "99.0.0" in satisfiability_warnings[0].message + + def test_satisfied_mthds_version_no_warning(self, tmp_path: Path) -> None: + """Manifest with mthds_version satisfied by current version should produce no warning.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "satisfied_mthds_ver" + shutil.copytree(src_dir, pkg_dir) + + manifest_content = textwrap.dedent("""\ + [package] + address = "github.com/test/satisfied-mthds" + version = "1.0.0" + description = "Satisfied mthds_version test" + authors = ["Test"] + license = "MIT" + mthds_version = ">=1.0.0" + """) + (pkg_dir / MANIFEST_FILENAME).write_text(manifest_content, encoding="utf-8") + + result = validate_for_publish(pkg_dir, check_git=False) + + manifest_issues = _issues_by_category(result, IssueCategory.MANIFEST) + satisfiability_warnings = [issue for issue in manifest_issues if issue.level == IssueLevel.WARNING and "not satisfied" in issue.message] + assert not satisfiability_warnings + + def test_git_tag_check_failure_emits_warning(self, tmp_path: Path, mocker: MockerFixture) -> None: + """When git status succeeds but git tag -l fails, a GIT warning is emitted.""" + src_dir = PACKAGES_DATA_DIR / "minimal_package" + pkg_dir = tmp_path / "tag_fail" + shutil.copytree(src_dir, pkg_dir) + + # git status --porcelain succeeds with clean output, git tag -l raises + status_result = MagicMock() + status_result.stdout = "" + + def side_effect_run(cmd: list[str], **kwargs: object) -> MagicMock: # noqa: ARG001 + if "status" in cmd: + return status_result + if "tag" in cmd: + raise subprocess.CalledProcessError(128, "git tag -l") + return MagicMock(stdout="") + + mocker.patch("pipelex.core.packages.publish_validation.subprocess.run", side_effect=side_effect_run) + + result = validate_for_publish(pkg_dir, check_git=True) + + git_issues = _issues_by_category(result, IssueCategory.GIT) + assert len(git_issues) >= 1 + tag_warnings = [issue for issue in git_issues if "tag" in issue.message.lower()] + assert len(tag_warnings) == 1 + assert tag_warnings[0].level.is_warning diff --git a/tests/unit/pipelex/core/packages/test_transitive_resolver.py b/tests/unit/pipelex/core/packages/test_transitive_resolver.py new file mode 100644 index 000000000..ecc48fa4b --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_transitive_resolver.py @@ -0,0 +1,610 @@ +from pathlib import Path + +import pytest +from pytest_mock import MockerFixture +from semantic_version import Version # type: ignore[import-untyped] + +from pipelex.core.packages.dependency_resolver import ( + ResolvedDependency, + resolve_all_dependencies, +) +from pipelex.core.packages.exceptions import TransitiveDependencyError +from pipelex.core.packages.manifest import MthdsPackageManifest, PackageDependency + + +def _make_manifest( + address: str, + version: str, + dependencies: list[PackageDependency] | None = None, +) -> MthdsPackageManifest: + """Helper to build a minimal manifest.""" + return MthdsPackageManifest( + address=address, + version=version, + description=f"Test package {address}", + dependencies=dependencies or [], + ) + + +def _make_resolved( + alias: str, + address: str, + manifest: MthdsPackageManifest | None, + tmp_path: Path, +) -> ResolvedDependency: + """Helper to build a ResolvedDependency for mocking.""" + pkg_dir = tmp_path / alias + pkg_dir.mkdir(exist_ok=True) + return ResolvedDependency( + alias=alias, + address=address, + manifest=manifest, + package_root=pkg_dir, + mthds_files=[], + exported_pipe_codes=None, + ) + + +class TestTransitiveResolver: + """Unit tests for transitive dependency resolution with mocked VCS.""" + + def test_linear_chain(self, mocker: MockerFixture, tmp_path: Path) -> None: + """A->B->C: both B and C appear in results.""" + # B depends on C + manifest_c = _make_manifest("github.com/org/pkg_c", "1.0.0") + manifest_b = _make_manifest( + "github.com/org/pkg_b", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_c", version="^1.0.0", alias="pkg_c"), + ], + ) + + resolved_b = _make_resolved("pkg_b", "github.com/org/pkg_b", manifest_b, tmp_path) + resolved_c = _make_resolved("pkg_c", "github.com/org/pkg_c", manifest_c, tmp_path) + + call_count = 0 + + def mock_resolve_remote(dep: PackageDependency, **_kwargs: object) -> ResolvedDependency: + nonlocal call_count + call_count += 1 + if dep.address == "github.com/org/pkg_b": + return resolved_b + if dep.address == "github.com/org/pkg_c": + return resolved_c + msg = f"Unexpected address: {dep.address}" + raise AssertionError(msg) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.resolve_remote_dependency", + side_effect=mock_resolve_remote, + ) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_b", version="^1.0.0", alias="pkg_b"), + ], + ) + + result = resolve_all_dependencies(manifest_a, tmp_path) + addresses = {dep.address for dep in result} + assert "github.com/org/pkg_b" in addresses + assert "github.com/org/pkg_c" in addresses + assert call_count == 2 + + def test_cycle_detection(self, mocker: MockerFixture, tmp_path: Path) -> None: + """A->B->A: raises TransitiveDependencyError with 'cycle'.""" + # B depends on A (cycle) + manifest_b = _make_manifest( + "github.com/org/pkg_b", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_a", version="^1.0.0", alias="pkg_a"), + ], + ) + + resolved_b = _make_resolved("pkg_b", "github.com/org/pkg_b", manifest_b, tmp_path) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.resolve_remote_dependency", + return_value=resolved_b, + ) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_b", version="^1.0.0", alias="pkg_b"), + ], + ) + + with pytest.raises(TransitiveDependencyError, match="cycle"): + resolve_all_dependencies(manifest_a, tmp_path) + + def test_diamond_resolved(self, mocker: MockerFixture, tmp_path: Path) -> None: + """A->B, A->C, both depend on D: D resolved once with compatible version.""" + manifest_d = _make_manifest("github.com/org/pkg_d", "1.2.0") + manifest_b = _make_manifest( + "github.com/org/pkg_b", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.0.0", alias="pkg_d"), + ], + ) + manifest_c = _make_manifest( + "github.com/org/pkg_c", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.1.0", alias="pkg_d"), + ], + ) + + resolved_b = _make_resolved("pkg_b", "github.com/org/pkg_b", manifest_b, tmp_path) + resolved_c = _make_resolved("pkg_c", "github.com/org/pkg_c", manifest_c, tmp_path) + resolved_d = _make_resolved("pkg_d", "github.com/org/pkg_d", manifest_d, tmp_path) + + def mock_resolve_remote(dep: PackageDependency, **_kwargs: object) -> ResolvedDependency: + if dep.address == "github.com/org/pkg_b": + return resolved_b + if dep.address == "github.com/org/pkg_c": + return resolved_c + if dep.address == "github.com/org/pkg_d": + return resolved_d + msg = f"Unexpected address: {dep.address}" + raise AssertionError(msg) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.resolve_remote_dependency", + side_effect=mock_resolve_remote, + ) + + # Mock version_satisfies to return True for compatible constraints + mocker.patch( + "pipelex.core.packages.dependency_resolver.version_satisfies", + return_value=True, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_constraint", + return_value=mocker.MagicMock(), + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_version", + return_value=Version("1.2.0"), + ) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_b", version="^1.0.0", alias="pkg_b"), + PackageDependency(address="github.com/org/pkg_c", version="^1.0.0", alias="pkg_c"), + ], + ) + + result = resolve_all_dependencies(manifest_a, tmp_path) + addresses = [dep.address for dep in result] + # D should appear exactly once + assert addresses.count("github.com/org/pkg_d") == 1 + # B and C should both be present + assert "github.com/org/pkg_b" in addresses + assert "github.com/org/pkg_c" in addresses + + def test_diamond_unsatisfiable(self, mocker: MockerFixture, tmp_path: Path) -> None: + """B needs D ^1.0.0, C needs D ^2.0.0: raises TransitiveDependencyError.""" + manifest_d_v1 = _make_manifest("github.com/org/pkg_d", "1.0.0") + manifest_b = _make_manifest( + "github.com/org/pkg_b", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.0.0", alias="pkg_d"), + ], + ) + manifest_c = _make_manifest( + "github.com/org/pkg_c", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^2.0.0", alias="pkg_d"), + ], + ) + + resolved_b = _make_resolved("pkg_b", "github.com/org/pkg_b", manifest_b, tmp_path) + resolved_c = _make_resolved("pkg_c", "github.com/org/pkg_c", manifest_c, tmp_path) + resolved_d = _make_resolved("pkg_d", "github.com/org/pkg_d", manifest_d_v1, tmp_path) + + def mock_resolve_remote(dep: PackageDependency, **_kwargs: object) -> ResolvedDependency: + if dep.address == "github.com/org/pkg_b": + return resolved_b + if dep.address == "github.com/org/pkg_c": + return resolved_c + if dep.address == "github.com/org/pkg_d": + return resolved_d + msg = f"Unexpected address: {dep.address}" + raise AssertionError(msg) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.resolve_remote_dependency", + side_effect=mock_resolve_remote, + ) + + # Mock version_satisfies to return False (existing v1 doesn't satisfy ^2.0.0) + mocker.patch( + "pipelex.core.packages.dependency_resolver.version_satisfies", + return_value=False, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_constraint", + return_value=mocker.MagicMock(), + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_version", + return_value=Version("1.0.0"), + ) + + # Mock the tags listing for diamond resolution + mocker.patch( + "pipelex.core.packages.dependency_resolver.list_remote_version_tags", + return_value=[(Version("1.0.0"), "v1.0.0"), (Version("1.5.0"), "v1.5.0")], + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.select_minimum_version_for_multiple_constraints", + return_value=None, # no version satisfies both ^1.0.0 and ^2.0.0 + ) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_b", version="^1.0.0", alias="pkg_b"), + PackageDependency(address="github.com/org/pkg_c", version="^1.0.0", alias="pkg_c"), + ], + ) + + with pytest.raises(TransitiveDependencyError, match="No version"): + resolve_all_dependencies(manifest_a, tmp_path) + + def test_local_deps_not_recursed(self, tmp_path: Path) -> None: + """Local path dep's sub-deps are NOT resolved transitively.""" + # Create a local dep directory with a manifest that has dependencies + local_dir = tmp_path / "local_pkg" + local_dir.mkdir() + methods_toml = """\ +[package] +address = "github.com/org/local_pkg" +version = "1.0.0" +description = "Local package" + +[dependencies] +sub_dep = { address = "github.com/org/sub_dep", version = "^1.0.0" } +""" + (local_dir / "METHODS.toml").write_text(methods_toml) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency( + address="github.com/org/local_pkg", + version="1.0.0", + alias="local_pkg", + path=str(local_dir), + ), + ], + ) + + # If sub_dep were resolved, it would fail because there's no mock. + # The fact it succeeds proves local deps are not recursed. + result = resolve_all_dependencies(manifest_a, tmp_path) + assert len(result) == 1 + assert result[0].alias == "local_pkg" + + def test_diamond_re_resolve_recurses_into_new_sub_deps(self, mocker: MockerFixture, tmp_path: Path) -> None: + """When diamond re-resolution picks a new version, its sub-deps are resolved.""" + # D v1.2.0 has sub-dep E (which D v1.0.0 did not have) + manifest_e = _make_manifest("github.com/org/pkg_e", "1.0.0") + manifest_d_v1 = _make_manifest("github.com/org/pkg_d", "1.0.0") + manifest_d_v1_2 = _make_manifest( + "github.com/org/pkg_d", + "1.2.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_e", version="^1.0.0", alias="pkg_e"), + ], + ) + manifest_b = _make_manifest( + "github.com/org/pkg_b", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.0.0", alias="pkg_d"), + ], + ) + manifest_c = _make_manifest( + "github.com/org/pkg_c", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.1.0", alias="pkg_d"), + ], + ) + + resolved_b = _make_resolved("pkg_b", "github.com/org/pkg_b", manifest_b, tmp_path) + resolved_c = _make_resolved("pkg_c", "github.com/org/pkg_c", manifest_c, tmp_path) + resolved_d_v1 = _make_resolved("pkg_d", "github.com/org/pkg_d", manifest_d_v1, tmp_path) + resolved_e = _make_resolved("pkg_e", "github.com/org/pkg_e", manifest_e, tmp_path) + + def mock_resolve_remote(dep: PackageDependency, **_kwargs: object) -> ResolvedDependency: + if dep.address == "github.com/org/pkg_b": + return resolved_b + if dep.address == "github.com/org/pkg_c": + return resolved_c + if dep.address == "github.com/org/pkg_d": + return resolved_d_v1 + if dep.address == "github.com/org/pkg_e": + return resolved_e + msg = f"Unexpected address: {dep.address}" + raise AssertionError(msg) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.resolve_remote_dependency", + side_effect=mock_resolve_remote, + ) + + # First encounter of D: v1.0.0 satisfies ^1.0.0 but NOT ^1.1.0 + mocker.patch( + "pipelex.core.packages.dependency_resolver.version_satisfies", + return_value=False, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_constraint", + return_value=mocker.MagicMock(), + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_version", + return_value=Version("1.0.0"), + ) + + # Diamond re-resolution picks D v1.2.0 + mocker.patch( + "pipelex.core.packages.dependency_resolver.list_remote_version_tags", + return_value=[(Version("1.0.0"), "v1.0.0"), (Version("1.2.0"), "v1.2.0")], + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.select_minimum_version_for_multiple_constraints", + return_value=Version("1.2.0"), + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.is_cached", + return_value=True, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.get_cached_package_path", + return_value=tmp_path / "pkg_d", + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver._find_manifest_in_dir", + return_value=manifest_d_v1_2, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.collect_mthds_files", + return_value=[], + ) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_b", version="^1.0.0", alias="pkg_b"), + PackageDependency(address="github.com/org/pkg_c", version="^1.0.0", alias="pkg_c"), + ], + ) + + result = resolve_all_dependencies(manifest_a, tmp_path) + addresses = {dep.address for dep in result} + # E should be resolved as a sub-dep of the re-resolved D v1.2.0 + assert "github.com/org/pkg_e" in addresses + + def test_stale_subdep_constraints_cleaned_on_diamond_reresolution(self, mocker: MockerFixture, tmp_path: Path) -> None: + """Stale constraints from an old version's sub-deps are removed during diamond re-resolution. + + Scenario: A→B→D@^1.0, A→C→D@^1.1. D@1.0.0 depends on E@^1.0. + Diamond re-resolves D to 1.1.0. D@1.1.0 depends on E@^2.0. + The stale E@^1.0 constraint from D@1.0.0 must be removed so E resolves + cleanly with just ^2.0 instead of failing on the incompatible [^1.0, ^2.0]. + """ + # E v2.0.0 (the version D@1.1.0 needs) + manifest_e = _make_manifest("github.com/org/pkg_e", "2.0.0") + + # D v1.0.0 (old) depends on E@^1.0 + manifest_d_v1 = _make_manifest( + "github.com/org/pkg_d", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_e", version="^1.0.0", alias="pkg_e"), + ], + ) + # D v1.1.0 (new, after diamond re-resolution) depends on E@^2.0 + manifest_d_v1_1 = _make_manifest( + "github.com/org/pkg_d", + "1.1.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_e", version="^2.0.0", alias="pkg_e"), + ], + ) + + # B depends on D@^1.0 + manifest_b = _make_manifest( + "github.com/org/pkg_b", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.0.0", alias="pkg_d"), + ], + ) + # C depends on D@^1.1 + manifest_c = _make_manifest( + "github.com/org/pkg_c", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.1.0", alias="pkg_d"), + ], + ) + + resolved_b = _make_resolved("pkg_b", "github.com/org/pkg_b", manifest_b, tmp_path) + resolved_c = _make_resolved("pkg_c", "github.com/org/pkg_c", manifest_c, tmp_path) + resolved_d_v1 = _make_resolved("pkg_d", "github.com/org/pkg_d", manifest_d_v1, tmp_path) + resolved_e = _make_resolved("pkg_e", "github.com/org/pkg_e", manifest_e, tmp_path) + + # Track which addresses were resolved via resolve_remote_dependency + remote_resolve_calls: list[str] = [] + + def mock_resolve_remote(dep: PackageDependency, **_kwargs: object) -> ResolvedDependency: + remote_resolve_calls.append(dep.address) + if dep.address == "github.com/org/pkg_b": + return resolved_b + if dep.address == "github.com/org/pkg_c": + return resolved_c + if dep.address == "github.com/org/pkg_d": + return resolved_d_v1 # First resolution gets v1.0.0 + if dep.address == "github.com/org/pkg_e": + return resolved_e + msg = f"Unexpected address: {dep.address}" + raise AssertionError(msg) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.resolve_remote_dependency", + side_effect=mock_resolve_remote, + ) + + # version_satisfies: D@1.0.0 does NOT satisfy ^1.1.0 + def mock_version_satisfies(version: Version, _constraint: object) -> bool: + return bool(version != Version("1.0.0")) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.version_satisfies", + side_effect=mock_version_satisfies, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_constraint", + return_value=mocker.MagicMock(), + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_version", + return_value=Version("1.0.0"), + ) + + # Diamond re-resolution for D picks v1.1.0 + mocker.patch( + "pipelex.core.packages.dependency_resolver.list_remote_version_tags", + return_value=[(Version("1.0.0"), "v1.0.0"), (Version("1.1.0"), "v1.1.0")], + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.select_minimum_version_for_multiple_constraints", + return_value=Version("1.1.0"), + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.is_cached", + return_value=True, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.get_cached_package_path", + return_value=tmp_path / "pkg_d", + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver._find_manifest_in_dir", + return_value=manifest_d_v1_1, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.collect_mthds_files", + return_value=[], + ) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_b", version="^1.0.0", alias="pkg_b"), + PackageDependency(address="github.com/org/pkg_c", version="^1.0.0", alias="pkg_c"), + ], + ) + + # Without the fix, this would raise TransitiveDependencyError because + # E would have stale constraint ^1.0.0 from D@1.0.0 plus ^2.0.0 from D@1.1.0 + result = resolve_all_dependencies(manifest_a, tmp_path) + addresses = {dep.address for dep in result} + + # E should be resolved (D@1.1.0's sub-dep) + assert "github.com/org/pkg_e" in addresses + # All deps should be present + assert "github.com/org/pkg_b" in addresses + assert "github.com/org/pkg_c" in addresses + assert "github.com/org/pkg_d" in addresses + + def test_dedup_same_address(self, mocker: MockerFixture, tmp_path: Path) -> None: + """Multiple paths to same address: resolved only once.""" + manifest_d = _make_manifest("github.com/org/pkg_d", "1.0.0") + + # Both B and C depend on D with the same constraint + manifest_b = _make_manifest( + "github.com/org/pkg_b", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.0.0", alias="pkg_d"), + ], + ) + manifest_c = _make_manifest( + "github.com/org/pkg_c", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_d", version="^1.0.0", alias="pkg_d"), + ], + ) + + resolved_b = _make_resolved("pkg_b", "github.com/org/pkg_b", manifest_b, tmp_path) + resolved_c = _make_resolved("pkg_c", "github.com/org/pkg_c", manifest_c, tmp_path) + resolved_d = _make_resolved("pkg_d", "github.com/org/pkg_d", manifest_d, tmp_path) + + resolve_count: dict[str, int] = {} + + def mock_resolve_remote(dep: PackageDependency, **_kwargs: object) -> ResolvedDependency: + resolve_count[dep.address] = resolve_count.get(dep.address, 0) + 1 + if dep.address == "github.com/org/pkg_b": + return resolved_b + if dep.address == "github.com/org/pkg_c": + return resolved_c + if dep.address == "github.com/org/pkg_d": + return resolved_d + msg = f"Unexpected address: {dep.address}" + raise AssertionError(msg) + + mocker.patch( + "pipelex.core.packages.dependency_resolver.resolve_remote_dependency", + side_effect=mock_resolve_remote, + ) + + # Mock version_satisfies for the dedup check + mocker.patch( + "pipelex.core.packages.dependency_resolver.version_satisfies", + return_value=True, + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_constraint", + return_value=mocker.MagicMock(), + ) + mocker.patch( + "pipelex.core.packages.dependency_resolver.parse_version", + return_value=Version("1.0.0"), + ) + + manifest_a = _make_manifest( + "github.com/org/pkg_a", + "1.0.0", + dependencies=[ + PackageDependency(address="github.com/org/pkg_b", version="^1.0.0", alias="pkg_b"), + PackageDependency(address="github.com/org/pkg_c", version="^1.0.0", alias="pkg_c"), + ], + ) + + result = resolve_all_dependencies(manifest_a, tmp_path) + addresses = [dep.address for dep in result] + # D appears once (deduped) + assert addresses.count("github.com/org/pkg_d") == 1 + # D was resolved only once via resolve_remote_dependency + assert resolve_count.get("github.com/org/pkg_d", 0) == 1 diff --git a/tests/unit/pipelex/core/packages/test_vcs_resolver.py b/tests/unit/pipelex/core/packages/test_vcs_resolver.py new file mode 100644 index 000000000..2a3b8b2cc --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_vcs_resolver.py @@ -0,0 +1,51 @@ +import pytest +from semantic_version import Version # type: ignore[import-untyped] + +from pipelex.core.packages.exceptions import VersionResolutionError +from pipelex.core.packages.vcs_resolver import address_to_clone_url, resolve_version_from_tags + + +class TestVCSResolver: + """Unit tests for pure VCS resolver functions.""" + + def test_address_to_clone_url_github(self): + """Standard GitHub address maps to HTTPS clone URL.""" + result = address_to_clone_url("github.com/org/repo") + assert result == "https://github.com/org/repo.git" + + def test_address_to_clone_url_generic_host(self): + """Non-GitHub host address maps correctly.""" + result = address_to_clone_url("gitlab.example.io/team/project") + assert result == "https://gitlab.example.io/team/project.git" + + def test_address_to_clone_url_already_dot_git(self): + """Address already ending with .git does not get doubled.""" + result = address_to_clone_url("github.com/org/repo.git") + assert result == "https://github.com/org/repo.git" + assert not result.endswith(".git.git") + + def test_resolve_version_from_tags_selects_minimum(self): + """MVS picks the lowest matching version.""" + tags: list[tuple[Version, str]] = [ + (Version("1.0.0"), "v1.0.0"), + (Version("1.1.0"), "v1.1.0"), + (Version("1.2.0"), "v1.2.0"), + (Version("2.0.0"), "v2.0.0"), + ] + selected_version, selected_tag = resolve_version_from_tags(tags, "^1.0.0") + assert selected_version == Version("1.0.0") + assert selected_tag == "v1.0.0" + + def test_resolve_version_from_tags_no_match_raises(self): + """No matching version raises VersionResolutionError.""" + tags: list[tuple[Version, str]] = [ + (Version("1.0.0"), "v1.0.0"), + (Version("1.1.0"), "v1.1.0"), + ] + with pytest.raises(VersionResolutionError, match="No version satisfying"): + resolve_version_from_tags(tags, "^2.0.0") + + def test_resolve_version_from_tags_empty_raises(self): + """Empty tag list raises VersionResolutionError.""" + with pytest.raises(VersionResolutionError, match="No version tags available"): + resolve_version_from_tags([], "^1.0.0") diff --git a/tests/unit/pipelex/core/packages/test_visibility.py b/tests/unit/pipelex/core/packages/test_visibility.py new file mode 100644 index 000000000..f08d6c9a0 --- /dev/null +++ b/tests/unit/pipelex/core/packages/test_visibility.py @@ -0,0 +1,187 @@ +import pytest + +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.core.packages.manifest import DomainExports, MthdsPackageManifest +from pipelex.core.packages.visibility import PackageVisibilityChecker +from pipelex.core.qualified_ref import QualifiedRef +from pipelex.pipe_controllers.sequence.pipe_sequence_blueprint import PipeSequenceBlueprint +from pipelex.pipe_controllers.sub_pipe_blueprint import SubPipeBlueprint +from pipelex.pipe_operators.llm.pipe_llm_blueprint import PipeLLMBlueprint + + +def _make_llm_pipe(description: str = "test", output: str = "Text", prompt: str = "test") -> PipeLLMBlueprint: + return PipeLLMBlueprint( + type="PipeLLM", + description=description, + output=output, + prompt=prompt, + ) + + +def _make_manifest_with_exports(exports: list[DomainExports]) -> MthdsPackageManifest: + return MthdsPackageManifest( + address="github.com/org/test", + version="1.0.0", + description="Test package", + exports=exports, + ) + + +class TestPackageVisibilityChecker: + """Tests for cross-domain pipe visibility enforcement.""" + + def test_no_manifest_no_violations(self): + """No manifest -> all pipes public, no violations.""" + bundle = PipelexBundleBlueprint( + domain="alpha", + pipe={"my_pipe": _make_llm_pipe()}, + ) + checker = PackageVisibilityChecker(manifest=None, bundles=[bundle]) + errors = checker.validate_all_pipe_references() + assert errors == [] + + def test_cross_domain_ref_to_exported_pipe_passes(self): + """Cross-domain ref to an exported pipe should pass.""" + manifest = _make_manifest_with_exports( + [ + DomainExports(domain_path="beta", pipes=["do_beta"]), + ] + ) + ref = QualifiedRef.parse_pipe_ref("beta.do_beta") + checker = PackageVisibilityChecker(manifest=manifest, bundles=[]) + assert checker.is_pipe_accessible_from(ref, "alpha") is True + + def test_cross_domain_ref_to_main_pipe_passes(self): + """Cross-domain ref to a main_pipe (not in exports) should pass (auto-export).""" + manifest = _make_manifest_with_exports([]) # No explicit exports + bundle_beta = PipelexBundleBlueprint( + domain="beta", + main_pipe="beta_main", + pipe={"beta_main": _make_llm_pipe()}, + ) + ref = QualifiedRef.parse_pipe_ref("beta.beta_main") + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle_beta]) + assert checker.is_pipe_accessible_from(ref, "alpha") is True + + def test_cross_domain_ref_to_non_exported_pipe_fails(self): + """Cross-domain ref to a non-exported pipe should produce a VisibilityError.""" + manifest = _make_manifest_with_exports( + [ + DomainExports(domain_path="beta", pipes=["public_pipe"]), + ] + ) + bundle_beta = PipelexBundleBlueprint( + domain="beta", + pipe={ + "public_pipe": _make_llm_pipe(), + "private_pipe": _make_llm_pipe(), + }, + ) + ref = QualifiedRef.parse_pipe_ref("beta.private_pipe") + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle_beta]) + assert checker.is_pipe_accessible_from(ref, "alpha") is False + + def test_same_domain_ref_to_non_exported_pipe_passes(self): + """Same-domain ref to a non-exported pipe should always pass.""" + manifest = _make_manifest_with_exports( + [ + DomainExports(domain_path="alpha", pipes=["exported_only"]), + ] + ) + ref = QualifiedRef.parse_pipe_ref("alpha.internal_pipe") + checker = PackageVisibilityChecker(manifest=manifest, bundles=[]) + assert checker.is_pipe_accessible_from(ref, "alpha") is True + + def test_bare_ref_passes(self): + """Bare ref (no domain qualifier) should always pass.""" + manifest = _make_manifest_with_exports([]) + ref = QualifiedRef(domain_path=None, local_code="some_pipe") + checker = PackageVisibilityChecker(manifest=manifest, bundles=[]) + assert checker.is_pipe_accessible_from(ref, "alpha") is True + + def test_validate_all_detects_violations(self): + """validate_all_pipe_references finds cross-domain violations in bundles.""" + manifest = _make_manifest_with_exports( + [ + DomainExports(domain_path="pkg_test_scoring", pipes=["pkg_test_compute_weighted_score"]), + ] + ) + # Bundle in legal.contracts that references a non-exported scoring pipe + bundle_legal = PipelexBundleBlueprint( + domain="pkg_test_legal.contracts", + pipe={ + "pkg_test_orchestrate": PipeSequenceBlueprint( + type="PipeSequence", + description="Orchestrate", + output="Text", + steps=[ + SubPipeBlueprint(pipe="pkg_test_scoring.pkg_test_private_helper"), + ], + ), + }, + ) + bundle_scoring = PipelexBundleBlueprint( + domain="pkg_test_scoring", + main_pipe="pkg_test_compute_weighted_score", + pipe={ + "pkg_test_compute_weighted_score": _make_llm_pipe(), + "pkg_test_private_helper": _make_llm_pipe(), + }, + ) + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle_legal, bundle_scoring]) + errors = checker.validate_all_pipe_references() + assert len(errors) == 1 + assert errors[0].pipe_ref == "pkg_test_scoring.pkg_test_private_helper" + assert "[exports" in errors[0].message + + def test_validate_all_no_violations_when_all_exported(self): + """validate_all_pipe_references returns empty when all refs are exported.""" + manifest = _make_manifest_with_exports( + [ + DomainExports(domain_path="pkg_test_scoring", pipes=["pkg_test_compute_weighted_score"]), + ] + ) + bundle_legal = PipelexBundleBlueprint( + domain="pkg_test_legal.contracts", + pipe={ + "pkg_test_orchestrate": PipeSequenceBlueprint( + type="PipeSequence", + description="Orchestrate", + output="Text", + steps=[ + SubPipeBlueprint(pipe="pkg_test_scoring.pkg_test_compute_weighted_score"), + ], + ), + }, + ) + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle_legal]) + errors = checker.validate_all_pipe_references() + assert errors == [] + + @pytest.mark.parametrize( + "reserved_domain", + ["native", "mthds", "pipelex"], + ) + def test_bundle_with_reserved_domain_produces_error(self, reserved_domain: str): + """Bundle declaring a reserved domain should produce a VisibilityError.""" + manifest = _make_manifest_with_exports([]) + bundle = PipelexBundleBlueprint( + domain=reserved_domain, + pipe={"some_pipe": _make_llm_pipe()}, + ) + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle]) + errors = checker.validate_reserved_domains() + assert len(errors) == 1 + assert "reserved domain" in errors[0].message + assert reserved_domain in errors[0].message + + def test_bundle_with_non_reserved_domain_no_error(self): + """Bundle declaring a non-reserved domain should produce no errors.""" + manifest = _make_manifest_with_exports([]) + bundle = PipelexBundleBlueprint( + domain="legal", + pipe={"some_pipe": _make_llm_pipe()}, + ) + checker = PackageVisibilityChecker(manifest=manifest, bundles=[bundle]) + errors = checker.validate_reserved_domains() + assert errors == [] diff --git a/tests/unit/pipelex/core/pipes/test_parse_concept_with_multiplicity.py b/tests/unit/pipelex/core/pipes/test_parse_concept_with_multiplicity.py index e454dcd5b..21878ea3d 100644 --- a/tests/unit/pipelex/core/pipes/test_parse_concept_with_multiplicity.py +++ b/tests/unit/pipelex/core/pipes/test_parse_concept_with_multiplicity.py @@ -90,3 +90,29 @@ def test_invalid_negative_multiplicity(self): with pytest.raises(ValueError, match="Invalid concept specification syntax"): parse_concept_with_multiplicity("domain.Concept[-5]") + + # ========== Hierarchical domain tests ========== + + def test_valid_hierarchical_domain_concept(self): + """Test parsing concept with hierarchical domain (multiple dot segments).""" + result = parse_concept_with_multiplicity("legal.contracts.NonCompeteClause") + assert result.concept_ref_or_code == "legal.contracts.NonCompeteClause" + assert result.multiplicity is None + + def test_valid_hierarchical_domain_concept_with_variable_list(self): + """Test parsing hierarchical domain concept with empty brackets [].""" + result = parse_concept_with_multiplicity("legal.contracts.NonCompeteClause[]") + assert result.concept_ref_or_code == "legal.contracts.NonCompeteClause" + assert result.multiplicity is True + + def test_valid_hierarchical_domain_concept_with_fixed_count(self): + """Test parsing hierarchical domain concept with fixed count [N].""" + result = parse_concept_with_multiplicity("legal.contracts.NonCompeteClause[5]") + assert result.concept_ref_or_code == "legal.contracts.NonCompeteClause" + assert result.multiplicity == 5 + + def test_valid_deep_hierarchical_domain(self): + """Test parsing concept with deeply nested domain.""" + result = parse_concept_with_multiplicity("a.b.c.d.Entity[]") + assert result.concept_ref_or_code == "a.b.c.d.Entity" + assert result.multiplicity is True diff --git a/tests/unit/pipelex/core/stuffs/data.py b/tests/unit/pipelex/core/stuffs/data.py index 5360c4678..2a6cd07fa 100644 --- a/tests/unit/pipelex/core/stuffs/data.py +++ b/tests/unit/pipelex/core/stuffs/data.py @@ -1,9 +1,9 @@ from typing import ClassVar from markupsafe import escape +from mthds.models.pipeline_inputs import StuffContentOrData from pydantic import Field -from pipelex.client.protocol import StuffContentOrData from pipelex.core.concepts.concept_factory import ConceptFactory from pipelex.core.concepts.native.concept_native import NativeConceptCode from pipelex.core.domains.domain import SpecialDomain diff --git a/tests/unit/pipelex/core/stuffs/test_stuff_factory_combine_stuffs.py b/tests/unit/pipelex/core/stuffs/test_stuff_factory_combine_stuffs.py new file mode 100644 index 000000000..e79d1f169 --- /dev/null +++ b/tests/unit/pipelex/core/stuffs/test_stuff_factory_combine_stuffs.py @@ -0,0 +1,164 @@ +import os +from pathlib import Path +from typing import TYPE_CHECKING, Callable + +import pytest +from pydantic import Field + +from pipelex.core.concepts.concept_factory import ConceptFactory +from pipelex.core.stuffs.exceptions import StuffFactoryError +from pipelex.core.stuffs.structured_content import StructuredContent +from pipelex.core.stuffs.stuff_factory import StuffFactory +from pipelex.core.stuffs.text_content import TextContent +from pipelex.hub import get_concept_library +from pipelex.system.registries.class_registry_utils import ClassRegistryUtils + +if TYPE_CHECKING: + from pipelex.core.stuffs.stuff_content import StuffContent + + +class SentimentAndWordCount(StructuredContent): + """A structured content combining sentiment and word count results.""" + + sentiment_result: TextContent = Field(description="Sentiment analysis result") + word_count_result: TextContent = Field(description="Word count result") + + +class SingleFieldContent(StructuredContent): + """A structured content with a single field.""" + + summary: TextContent = Field(description="Summary text") + + +DOMAIN_CODE = "test_combine" + + +@pytest.fixture(scope="class") +def setup_combine_concepts(load_test_library: Callable[[list[Path]], None]): + """Register structured content classes and create concepts for combine_stuffs tests.""" + load_test_library([Path(__file__).parent]) + ClassRegistryUtils.register_classes_in_file( + file_path=os.path.join(os.path.dirname(__file__), "test_stuff_factory_combine_stuffs.py"), + base_class=StructuredContent, + is_include_imported=False, + ) + + concept_library = get_concept_library() + + concept_sentiment_and_word_count = ConceptFactory.make( + concept_code="SentimentAndWordCount", + domain_code=DOMAIN_CODE, + description="Combined sentiment and word count", + structure_class_name="SentimentAndWordCount", + ) + concept_library.add_new_concept(concept=concept_sentiment_and_word_count) + + concept_single_field = ConceptFactory.make( + concept_code="SingleFieldContent", + domain_code=DOMAIN_CODE, + description="Single field content", + structure_class_name="SingleFieldContent", + ) + concept_library.add_new_concept(concept=concept_single_field) + + yield + + concept_library.remove_concepts_by_concept_refs( + concept_refs=[ + f"{DOMAIN_CODE}.SentimentAndWordCount", + f"{DOMAIN_CODE}.SingleFieldContent", + ] + ) + + +@pytest.mark.usefixtures("setup_combine_concepts") +class TestStuffFactoryCombineStuffs: + """Tests for StuffFactory.combine_stuffs method.""" + + def test_combine_two_text_contents(self): + """Test combining two TextContent fields into a StructuredContent stuff.""" + concept = get_concept_library().get_required_concept(concept_ref=f"{DOMAIN_CODE}.SentimentAndWordCount") + + stuff_contents: dict[str, StuffContent] = { + "sentiment_result": TextContent(text="positive"), + "word_count_result": TextContent(text="42"), + } + + result = StuffFactory.combine_stuffs( + concept=concept, + stuff_contents=stuff_contents, + name="combined_analysis", + ) + + assert result.stuff_name == "combined_analysis" + assert isinstance(result.content, SentimentAndWordCount) + assert result.content.sentiment_result.text == "positive" + assert result.content.word_count_result.text == "42" + assert result.concept.code == "SentimentAndWordCount" + assert result.concept.domain_code == DOMAIN_CODE + + def test_combine_single_field(self): + """Test combining a single TextContent field.""" + concept = get_concept_library().get_required_concept(concept_ref=f"{DOMAIN_CODE}.SingleFieldContent") + + stuff_contents: dict[str, StuffContent] = { + "summary": TextContent(text="This is a summary"), + } + + result = StuffFactory.combine_stuffs( + concept=concept, + stuff_contents=stuff_contents, + name="single_field_stuff", + ) + + assert isinstance(result.content, SingleFieldContent) + assert result.content.summary.text == "This is a summary" + + def test_combine_without_name_auto_generates(self): + """Test that omitting the name parameter still produces a valid Stuff.""" + concept = get_concept_library().get_required_concept(concept_ref=f"{DOMAIN_CODE}.SingleFieldContent") + + stuff_contents: dict[str, StuffContent] = { + "summary": TextContent(text="auto-named"), + } + + result = StuffFactory.combine_stuffs( + concept=concept, + stuff_contents=stuff_contents, + ) + + assert result.stuff_name is not None + assert len(result.stuff_name) > 0 + assert isinstance(result.content, SingleFieldContent) + + def test_combine_with_missing_field_raises_error(self): + """Test that missing a required field raises StuffFactoryError.""" + concept = get_concept_library().get_required_concept(concept_ref=f"{DOMAIN_CODE}.SentimentAndWordCount") + + stuff_contents: dict[str, StuffContent] = { + "sentiment_result": TextContent(text="positive"), + # missing word_count_result + } + + with pytest.raises(StuffFactoryError, match="Error combining stuffs"): + StuffFactory.combine_stuffs( + concept=concept, + stuff_contents=stuff_contents, + name="incomplete", + ) + + def test_combine_with_wrong_content_type_raises_error(self): + """Test that passing wrong content type for a field raises StuffFactoryError.""" + concept = get_concept_library().get_required_concept(concept_ref=f"{DOMAIN_CODE}.SentimentAndWordCount") + + stuff_contents: dict[str, StuffContent] = { + "sentiment_result": TextContent(text="positive"), + "word_count_result": "not_a_stuff_content", # type: ignore[dict-item] + } + + with pytest.raises(StuffFactoryError, match="Error combining stuffs"): + StuffFactory.combine_stuffs( + concept=concept, + stuff_contents=stuff_contents, + name="wrong_type", + ) diff --git a/tests/unit/pipelex/core/stuffs/test_stuff_factory_implicit_memory.py b/tests/unit/pipelex/core/stuffs/test_stuff_factory_implicit_memory.py index 673c38d85..2c3ff718f 100644 --- a/tests/unit/pipelex/core/stuffs/test_stuff_factory_implicit_memory.py +++ b/tests/unit/pipelex/core/stuffs/test_stuff_factory_implicit_memory.py @@ -3,9 +3,9 @@ from typing import Any, Callable import pytest +from mthds.models.pipeline_inputs import StuffContentOrData from pipelex import log, pretty_print -from pipelex.client.protocol import StuffContentOrData from pipelex.core.concepts.concept_factory import ConceptFactory from pipelex.core.stuffs.structured_content import StructuredContent from pipelex.core.stuffs.stuff import Stuff diff --git a/tests/unit/pipelex/core/test_data/domain/simple_domains.py b/tests/unit/pipelex/core/test_data/domain/simple_domains.py index 4a7bd5c0a..7d28758e4 100644 --- a/tests/unit/pipelex/core/test_data/domain/simple_domains.py +++ b/tests/unit/pipelex/core/test_data/domain/simple_domains.py @@ -24,8 +24,32 @@ ), ) +HIERARCHICAL_DOMAIN = ( + "hierarchical_domain", + """domain = "legal.contracts" +description = "A hierarchical domain for legal contracts" +""", + PipelexBundleBlueprint( + domain="legal.contracts", + description="A hierarchical domain for legal contracts", + ), +) + +DEEP_HIERARCHICAL_DOMAIN = ( + "deep_hierarchical_domain", + """domain = "legal.contracts.shareholder" +description = "A deeply nested hierarchical domain" +""", + PipelexBundleBlueprint( + domain="legal.contracts.shareholder", + description="A deeply nested hierarchical domain", + ), +) + # Export all domain test cases DOMAIN_TEST_CASES = [ SIMPLE_DOMAIN, DOMAIN_WITH_SYSTEM_PROMPTS, + HIERARCHICAL_DOMAIN, + DEEP_HIERARCHICAL_DOMAIN, ] diff --git a/tests/unit/pipelex/core/test_data/errors/invalid_plx.py b/tests/unit/pipelex/core/test_data/errors/invalid_mthds.py similarity index 82% rename from tests/unit/pipelex/core/test_data/errors/invalid_plx.py rename to tests/unit/pipelex/core/test_data/errors/invalid_mthds.py index a1571ec44..841962f75 100644 --- a/tests/unit/pipelex/core/test_data/errors/invalid_plx.py +++ b/tests/unit/pipelex/core/test_data/errors/invalid_mthds.py @@ -1,13 +1,13 @@ -from pipelex.core.interpreter.interpreter import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.interpreter import MthdsDecodeError, PipelexInterpreterError -INVALID_PLX_SYNTAX = ( - "invalid_plx_syntax", +INVALID_MTHDS_SYNTAX = ( + "invalid_mthds_syntax", """domain = "test_domain" -description = "Domain with invalid PLX syntax" +description = "Domain with invalid MTHDS syntax" [concept] InvalidConcept = "This is missing a closing quote""", - PLXDecodeError, + MthdsDecodeError, ) MALFORMED_SECTION = ( @@ -18,7 +18,7 @@ [concept TestConcept = "Missing closing bracket" """, - PLXDecodeError, + MthdsDecodeError, ) UNCLOSED_STRING = ( @@ -26,7 +26,7 @@ """domain = "test_domain" description = "Domain with unclosed string """, - PLXDecodeError, + MthdsDecodeError, ) DUPLICATE_KEYS = ( @@ -38,7 +38,7 @@ [concept] TestConcept = "A test concept" """, - PLXDecodeError, + MthdsDecodeError, ) INVALID_ESCAPE_SEQUENCE = ( @@ -49,7 +49,7 @@ [concept] TestConcept = "A test concept" """, - PLXDecodeError, + MthdsDecodeError, ) # PipelexBundleBlueprint Structure Errors @@ -120,7 +120,7 @@ [concept] TestConcept = "A test concept" """, - TypeError, + PipelexInterpreterError, ) WRONG_TYPE_FOR_DEFINITION = ( @@ -174,7 +174,7 @@ [concept.] InvalidName = "Empty table name" """, - PLXDecodeError, + MthdsDecodeError, ) INVALID_ARRAY_SYNTAX = ( @@ -185,7 +185,7 @@ [concept] TestConcept = ["Unclosed array" """, - PLXDecodeError, + MthdsDecodeError, ) INVALID_ARRAY_SYNTAX2 = ( "invalid_array_syntax", @@ -195,13 +195,35 @@ [concept] [concept] """, - PLXDecodeError, + MthdsDecodeError, +) + +DOUBLE_DOT_DOMAIN = ( + "double_dot_domain", + """domain = "legal..contracts" +description = "Domain with double dots" + +[concept] +TestConcept = "A test concept" +""", + PipelexInterpreterError, +) + +LEADING_DOT_DOMAIN = ( + "leading_dot_domain", + """domain = ".legal" +description = "Domain with leading dot" + +[concept] +TestConcept = "A test concept" +""", + PipelexInterpreterError, ) # Export all error test cases ERROR_TEST_CASES: list[tuple[str, str, type[Exception] | tuple[type[Exception], ...]]] = [ - # PLX Syntax Errors - INVALID_PLX_SYNTAX, + # MTHDS Syntax Errors + INVALID_MTHDS_SYNTAX, MALFORMED_SECTION, UNCLOSED_STRING, DUPLICATE_KEYS, @@ -220,4 +242,7 @@ WRONG_TYPE_FOR_CONCEPT_SECTION, WRONG_TYPE_FOR_PIPE_SECTION, INVALID_NESTED_SECTION, + # Hierarchical Domain Errors + DOUBLE_DOT_DOMAIN, + LEADING_DOT_DOMAIN, ] diff --git a/tests/unit/pipelex/core/test_data/interpreter_test_cases.py b/tests/unit/pipelex/core/test_data/interpreter_test_cases.py index 663094fea..f69f5c37c 100644 --- a/tests/unit/pipelex/core/test_data/interpreter_test_cases.py +++ b/tests/unit/pipelex/core/test_data/interpreter_test_cases.py @@ -6,7 +6,7 @@ from tests.unit.pipelex.core.test_data.concepts.simple_concepts import SIMPLE_CONCEPT_TEST_CASES from tests.unit.pipelex.core.test_data.concepts.structured_concepts import STRUCTURED_CONCEPT_TEST_CASES from tests.unit.pipelex.core.test_data.domain.simple_domains import DOMAIN_TEST_CASES -from tests.unit.pipelex.core.test_data.errors.invalid_plx import ERROR_TEST_CASES +from tests.unit.pipelex.core.test_data.errors.invalid_mthds import ERROR_TEST_CASES from tests.unit.pipelex.core.test_data.pipes.controllers.batch.pipe_batch import PIPE_BATCH_TEST_CASES from tests.unit.pipelex.core.test_data.pipes.controllers.condition.pipe_condition import PIPE_CONDITION_TEST_CASES from tests.unit.pipelex.core.test_data.pipes.controllers.parallel.pipe_parallel import PIPE_PARALLEL_TEST_CASES @@ -19,10 +19,10 @@ class InterpreterTestCases: - """Test cases for PipelexInterpreter with PLX content and expected blueprints.""" + """Test cases for PipelexInterpreter with MTHDS content and expected blueprints.""" # Aggregate all valid test cases from organized modules - VALID_TEST_CASES: ClassVar[list[tuple[str, str, PipelexBundleBlueprint]]] = [ # test_name,plx_content,blueprint + VALID_TEST_CASES: ClassVar[list[tuple[str, str, PipelexBundleBlueprint]]] = [ # test_name,mthds_content,blueprint # Domain tests *DOMAIN_TEST_CASES, # Concept tests diff --git a/tests/unit/pipelex/core/test_data/pipes/controllers/parallel/pipe_parallel.py b/tests/unit/pipelex/core/test_data/pipes/controllers/parallel/pipe_parallel.py index 3ab345bcd..3c880382b 100644 --- a/tests/unit/pipelex/core/test_data/pipes/controllers/parallel/pipe_parallel.py +++ b/tests/unit/pipelex/core/test_data/pipes/controllers/parallel/pipe_parallel.py @@ -14,7 +14,7 @@ type = "PipeParallel" description = "PipeParallel example in PIPE_PARALLEL_TEST_CASES" output = "ProcessedData" -parallels = [ +branches = [ { pipe = "process_a", result = "result_a" }, { pipe = "process_b", result = "result_b" }, ] @@ -29,7 +29,7 @@ type="PipeParallel", description="PipeParallel example in PIPE_PARALLEL_TEST_CASES", output="ProcessedData", - parallels=[ + branches=[ SubPipeBlueprint(pipe="process_a", result="result_a"), SubPipeBlueprint(pipe="process_b", result="result_b"), ], diff --git a/tests/unit/pipelex/core/test_data/pipes/controllers/sequence/pipe_sequence.py b/tests/unit/pipelex/core/test_data/pipes/controllers/sequence/pipe_sequence.py index c56ff265b..5f763b1a6 100644 --- a/tests/unit/pipelex/core/test_data/pipes/controllers/sequence/pipe_sequence.py +++ b/tests/unit/pipelex/core/test_data/pipes/controllers/sequence/pipe_sequence.py @@ -37,7 +37,39 @@ ), ) +PIPE_SEQUENCE_WITH_CROSS_DOMAIN_REF = ( + "pipe_sequence_with_cross_domain_ref", + """domain = "orchestration" +description = "Domain with cross-domain pipe ref in sequence" + +[pipe.orchestrate] +type = "PipeSequence" +description = "Orchestrate with cross-domain pipe" +output = "Text" +steps = [ + { pipe = "scoring.compute_score", result = "score" }, + { pipe = "format_result", result = "final" }, +] +""", + PipelexBundleBlueprint( + domain="orchestration", + description="Domain with cross-domain pipe ref in sequence", + pipe={ + "orchestrate": PipeSequenceBlueprint( + type="PipeSequence", + description="Orchestrate with cross-domain pipe", + output="Text", + steps=[ + SubPipeBlueprint(pipe="scoring.compute_score", result="score"), + SubPipeBlueprint(pipe="format_result", result="final"), + ], + ), + }, + ), +) + # Export all PipeSequence test cases PIPE_SEQUENCE_TEST_CASES = [ PIPE_SEQUENCE, + PIPE_SEQUENCE_WITH_CROSS_DOMAIN_REF, ] diff --git a/tests/unit/pipelex/core/test_qualified_ref.py b/tests/unit/pipelex/core/test_qualified_ref.py new file mode 100644 index 000000000..42f0e7728 --- /dev/null +++ b/tests/unit/pipelex/core/test_qualified_ref.py @@ -0,0 +1,174 @@ +import pytest +from pydantic import ValidationError + +from pipelex.core.qualified_ref import QualifiedRef, QualifiedRefError + + +class TestQualifiedRef: + """Test centralized reference parsing via QualifiedRef.""" + + # ========== parse() ========== + + @pytest.mark.parametrize( + ("raw", "expected_domain", "expected_code"), + [ + ("Text", None, "Text"), + ("compute_score", None, "compute_score"), + ("native.Text", "native", "Text"), + ("scoring.compute_score", "scoring", "compute_score"), + ("legal.contracts.NonCompeteClause", "legal.contracts", "NonCompeteClause"), + ("a.b.c.D", "a.b.c", "D"), + ], + ) + def test_parse_valid(self, raw: str, expected_domain: str | None, expected_code: str): + """Test parse splits correctly on last dot.""" + ref = QualifiedRef.parse(raw) + assert ref.domain_path == expected_domain + assert ref.local_code == expected_code + + @pytest.mark.parametrize( + "raw", + [ + "", + ".extract", + "domain.", + "legal..contracts.X", + "..foo", + "foo..", + ], + ) + def test_parse_invalid(self, raw: str): + """Test parse raises on invalid input.""" + with pytest.raises(QualifiedRefError): + QualifiedRef.parse(raw) + + # ========== parse_concept_ref() ========== + + @pytest.mark.parametrize( + ("raw", "expected_domain", "expected_code"), + [ + ("native.Text", "native", "Text"), + ("legal.contracts.NonCompeteClause", "legal.contracts", "NonCompeteClause"), + ("legal.contracts.shareholder.Agreement", "legal.contracts.shareholder", "Agreement"), + ("myapp.BaseEntity", "myapp", "BaseEntity"), + ("a.b.c.D", "a.b.c", "D"), + ], + ) + def test_parse_concept_ref_valid(self, raw: str, expected_domain: str | None, expected_code: str): + """Test parse_concept_ref accepts valid concept references.""" + ref = QualifiedRef.parse_concept_ref(raw) + assert ref.domain_path == expected_domain + assert ref.local_code == expected_code + + @pytest.mark.parametrize( + "raw", + [ + "", + "legal..contracts.X", + ".Text", + "native.text", + "NATIVE.Text", + "my-app.Entity", + ], + ) + def test_parse_concept_ref_invalid(self, raw: str): + """Test parse_concept_ref raises on invalid input.""" + with pytest.raises(QualifiedRefError): + QualifiedRef.parse_concept_ref(raw) + + # ========== parse_pipe_ref() ========== + + @pytest.mark.parametrize( + ("raw", "expected_domain", "expected_code"), + [ + ("scoring.compute_score", "scoring", "compute_score"), + ("legal.contracts.extract_clause", "legal.contracts", "extract_clause"), + ("a.b.c.do_thing", "a.b.c", "do_thing"), + ], + ) + def test_parse_pipe_ref_valid(self, raw: str, expected_domain: str | None, expected_code: str): + """Test parse_pipe_ref accepts valid pipe references.""" + ref = QualifiedRef.parse_pipe_ref(raw) + assert ref.domain_path == expected_domain + assert ref.local_code == expected_code + + @pytest.mark.parametrize( + "raw", + [ + "", + ".extract", + "legal..contracts.x", + "scoring.ComputeScore", + "MY_APP.extract", + ], + ) + def test_parse_pipe_ref_invalid(self, raw: str): + """Test parse_pipe_ref raises on invalid input.""" + with pytest.raises(QualifiedRefError): + QualifiedRef.parse_pipe_ref(raw) + + # ========== full_ref ========== + + def test_full_ref_bare(self): + """Test full_ref for bare references.""" + ref = QualifiedRef(domain_path=None, local_code="Text") + assert ref.full_ref == "Text" + + def test_full_ref_qualified(self): + """Test full_ref for domain-qualified references.""" + ref = QualifiedRef(domain_path="legal.contracts", local_code="NonCompeteClause") + assert ref.full_ref == "legal.contracts.NonCompeteClause" + + # ========== is_qualified ========== + + def test_is_qualified_true(self): + ref = QualifiedRef(domain_path="scoring", local_code="compute_score") + assert ref.is_qualified is True + + def test_is_qualified_false(self): + ref = QualifiedRef(domain_path=None, local_code="compute_score") + assert ref.is_qualified is False + + # ========== from_domain_and_code() ========== + + def test_from_domain_and_code(self): + ref = QualifiedRef.from_domain_and_code(domain_path="legal.contracts", local_code="NonCompeteClause") + assert ref.domain_path == "legal.contracts" + assert ref.local_code == "NonCompeteClause" + assert ref.full_ref == "legal.contracts.NonCompeteClause" + + # ========== is_local_to() / is_external_to() ========== + + def test_is_local_to_same_domain(self): + ref = QualifiedRef(domain_path="scoring", local_code="compute_score") + assert ref.is_local_to("scoring") is True + + def test_is_local_to_bare_ref(self): + """Bare refs are always local.""" + ref = QualifiedRef(domain_path=None, local_code="compute_score") + assert ref.is_local_to("scoring") is True + + def test_is_local_to_different_domain(self): + ref = QualifiedRef(domain_path="scoring", local_code="compute_score") + assert ref.is_local_to("orchestration") is False + + def test_is_external_to_different_domain(self): + ref = QualifiedRef(domain_path="scoring", local_code="compute_score") + assert ref.is_external_to("orchestration") is True + + def test_is_external_to_same_domain(self): + ref = QualifiedRef(domain_path="scoring", local_code="compute_score") + assert ref.is_external_to("scoring") is False + + def test_is_external_to_bare_ref(self): + """Bare refs are never external.""" + ref = QualifiedRef(domain_path=None, local_code="compute_score") + assert ref.is_external_to("scoring") is False + + # ========== Frozen model ========== + + def test_frozen_model(self): + """Test that QualifiedRef instances are immutable.""" + ref = QualifiedRef(domain_path="scoring", local_code="compute_score") + with pytest.raises(ValidationError, match="frozen"): + ref.local_code = "other" # type: ignore[misc] diff --git a/tests/unit/pipelex/graph/test_dashed_edge_rendering.py b/tests/unit/pipelex/graph/test_dashed_edge_rendering.py new file mode 100644 index 000000000..eccf4f7a8 --- /dev/null +++ b/tests/unit/pipelex/graph/test_dashed_edge_rendering.py @@ -0,0 +1,279 @@ +import re +from datetime import datetime, timezone +from typing import Any, ClassVar + +import pytest + +from pipelex.graph.graphspec import ( + EdgeKind, + EdgeSpec, + GraphSpec, + IOSpec, + NodeIOSpec, + NodeKind, + NodeSpec, + NodeStatus, + PipelineRef, +) +from pipelex.graph.mermaidflow.mermaidflow_factory import MermaidflowFactory + +from .conftest import make_graph_config + + +class TestDashedEdgeRendering: + """Tests for dashed-edge rendering logic across BATCH_ITEM, BATCH_AGGREGATE, and PARALLEL_COMBINE edge kinds.""" + + GRAPH_ID: ClassVar[str] = "dashed_edge_test:001" + CREATED_AT: ClassVar[datetime] = datetime(2024, 1, 15, 10, 30, 0, tzinfo=timezone.utc) + + def _make_graph( + self, + nodes: list[dict[str, Any]], + edges: list[dict[str, Any]] | None = None, + ) -> GraphSpec: + """Helper to create a GraphSpec with nodes and edges.""" + node_specs: list[NodeSpec] = [] + for node_dict in nodes: + node_specs.append(NodeSpec(**node_dict)) + + edge_specs: list[EdgeSpec] = [] + if edges: + for edge_dict in edges: + edge_specs.append(EdgeSpec(**edge_dict)) + + return GraphSpec( + graph_id=self.GRAPH_ID, + created_at=self.CREATED_AT, + pipeline_ref=PipelineRef(), + nodes=node_specs, + edges=edge_specs, + ) + + def _extract_dashed_edges(self, mermaid_code: str) -> list[str]: + """Extract all dashed-edge lines from mermaid code. + + Returns: + Lines containing dashed arrows (-.-> or -."label".->). + """ + return [line.strip() for line in mermaid_code.split("\n") if ".->" in line] + + def _build_controller_graph_with_dashed_edge( + self, + edge_kind: EdgeKind, + edge_label: str | None = None, + ) -> GraphSpec: + """Build a graph with a controller, two children, and a dashed edge between their stuffs. + + The controller contains two child pipes. The dashed edge connects + source_stuff from child_a to target_stuff owned by the controller (for aggregate/combine) + or child_b (for batch_item). + + Args: + edge_kind: The kind of dashed edge to create. + edge_label: Optional label for the dashed edge. + + Returns: + A GraphSpec with the dashed-edge scenario. + """ + controller = { + "node_id": "ctrl_1", + "kind": NodeKind.CONTROLLER, + "pipe_code": "batch_ctrl", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[], + outputs=[IOSpec(name="ctrl_output", concept="OutputList", digest="ctrl_out_digest")], + ), + } + child_a = { + "node_id": "child_a", + "kind": NodeKind.OPERATOR, + "pipe_code": "pipe_a", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[], + outputs=[IOSpec(name="source_stuff", concept="Text", digest="source_digest")], + ), + } + child_b = { + "node_id": "child_b", + "kind": NodeKind.OPERATOR, + "pipe_code": "pipe_b", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[IOSpec(name="target_stuff", concept="Text", digest="target_digest")], + outputs=[], + ), + } + contains_a = { + "edge_id": "edge_contains_a", + "source": "ctrl_1", + "target": "child_a", + "kind": EdgeKind.CONTAINS, + } + contains_b = { + "edge_id": "edge_contains_b", + "source": "ctrl_1", + "target": "child_b", + "kind": EdgeKind.CONTAINS, + } + + # For BATCH_AGGREGATE and PARALLEL_COMBINE, target is the controller's output stuff + # For BATCH_ITEM, target is child_b's input stuff + target_stuff_digest: str + match edge_kind: + case EdgeKind.BATCH_ITEM: + target_stuff_digest = "target_digest" + case EdgeKind.BATCH_AGGREGATE | EdgeKind.PARALLEL_COMBINE: + target_stuff_digest = "ctrl_out_digest" + case EdgeKind.CONTROL | EdgeKind.DATA | EdgeKind.CONTAINS | EdgeKind.SELECTED_OUTCOME: + msg = f"Unexpected edge kind for dashed edge test: {edge_kind}" + raise ValueError(msg) + + dashed_edge: dict[str, Any] = { + "edge_id": "edge_dashed", + "source": "child_a", + "target": "ctrl_1", + "kind": edge_kind, + "source_stuff_digest": "source_digest", + "target_stuff_digest": target_stuff_digest, + } + if edge_label: + dashed_edge["label"] = edge_label + + return self._make_graph( + nodes=[controller, child_a, child_b], + edges=[contains_a, contains_b, dashed_edge], + ) + + @pytest.mark.parametrize( + ("topic", "edge_kind"), + [ + ("BATCH_ITEM", EdgeKind.BATCH_ITEM), + ("BATCH_AGGREGATE", EdgeKind.BATCH_AGGREGATE), + ("PARALLEL_COMBINE", EdgeKind.PARALLEL_COMBINE), + ], + ) + def test_dashed_edge_rendered_for_each_kind(self, topic: str, edge_kind: EdgeKind) -> None: + """Verify that each dashed-edge kind produces at least one dashed arrow.""" + graph = self._build_controller_graph_with_dashed_edge(edge_kind=edge_kind) + graph_config = make_graph_config() + result = MermaidflowFactory.make_from_graphspec(graph, graph_config) + + dashed_lines = self._extract_dashed_edges(result.mermaid_code) + assert len(dashed_lines) >= 1, f"Expected at least one dashed edge for {topic}, got none" + + @pytest.mark.parametrize( + ("topic", "edge_kind"), + [ + ("BATCH_ITEM", EdgeKind.BATCH_ITEM), + ("BATCH_AGGREGATE", EdgeKind.BATCH_AGGREGATE), + ("PARALLEL_COMBINE", EdgeKind.PARALLEL_COMBINE), + ], + ) + def test_dashed_edge_with_label(self, topic: str, edge_kind: EdgeKind) -> None: + """Verify that labeled dashed edges include the label in the mermaid syntax.""" + graph = self._build_controller_graph_with_dashed_edge(edge_kind=edge_kind, edge_label="my_label") + graph_config = make_graph_config() + result = MermaidflowFactory.make_from_graphspec(graph, graph_config) + + dashed_lines = self._extract_dashed_edges(result.mermaid_code) + labeled = [line for line in dashed_lines if "my_label" in line] + assert len(labeled) >= 1, f"Expected a labeled dashed edge for {topic}, got: {dashed_lines}" + + @pytest.mark.parametrize( + ("topic", "edge_kind"), + [ + ("BATCH_ITEM", EdgeKind.BATCH_ITEM), + ("BATCH_AGGREGATE", EdgeKind.BATCH_AGGREGATE), + ("PARALLEL_COMBINE", EdgeKind.PARALLEL_COMBINE), + ], + ) + def test_dashed_edge_without_label(self, topic: str, edge_kind: EdgeKind) -> None: + """Verify that unlabeled dashed edges use plain dashed arrow syntax.""" + graph = self._build_controller_graph_with_dashed_edge(edge_kind=edge_kind) + graph_config = make_graph_config() + result = MermaidflowFactory.make_from_graphspec(graph, graph_config) + + dashed_lines = self._extract_dashed_edges(result.mermaid_code) + # Unlabeled edges use `-.->` without a label string + plain_dashed = [line for line in dashed_lines if ".->" in line and '-."' not in line] + assert len(plain_dashed) >= 1, f"Expected a plain dashed edge for {topic}, got: {dashed_lines}" + + def test_all_edge_kinds_use_same_dashed_syntax(self) -> None: + """Verify that all three dashed-edge kinds produce structurally identical dashed arrow syntax. + + This test catches divergence if one copy of the logic is modified but not the others. + """ + results_by_kind: dict[str, list[str]] = {} + for edge_kind in (EdgeKind.BATCH_ITEM, EdgeKind.BATCH_AGGREGATE, EdgeKind.PARALLEL_COMBINE): + graph = self._build_controller_graph_with_dashed_edge(edge_kind=edge_kind, edge_label="test_label") + graph_config = make_graph_config() + result = MermaidflowFactory.make_from_graphspec(graph, graph_config) + + dashed_lines = self._extract_dashed_edges(result.mermaid_code) + # Extract just the arrow operator from each line (e.g., `-."test_label".->` or `-.->`) + # by replacing stuff IDs (s_XXX) with a placeholder + normalized = [re.sub(r"s_[a-f0-9]+", "ID", line) for line in dashed_lines] + results_by_kind[edge_kind] = normalized + + # All three should produce the same normalized patterns + kinds = list(results_by_kind.keys()) + for index_kind in range(1, len(kinds)): + assert results_by_kind[kinds[0]] == results_by_kind[kinds[index_kind]], ( + f"Dashed edge syntax differs between {kinds[0]} and {kinds[index_kind]}: " + f"{results_by_kind[kinds[0]]} vs {results_by_kind[kinds[index_kind]]}" + ) + + def test_missing_stuff_resolved_on_the_fly(self) -> None: + """Verify that stuff nodes not in the normal stuff_registry get rendered on-the-fly for dashed edges. + + Creates a scenario where the target stuff only exists on the controller's output + (not registered through normal pipe IOSpec), so it must be resolved from all_stuff_info. + """ + controller = { + "node_id": "ctrl_1", + "kind": NodeKind.CONTROLLER, + "pipe_code": "batch_ctrl", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[], + outputs=[IOSpec(name="aggregated_output", concept="OutputList", digest="agg_digest")], + ), + } + child = { + "node_id": "child_1", + "kind": NodeKind.OPERATOR, + "pipe_code": "child_pipe", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[], + outputs=[IOSpec(name="item_output", concept="Text", digest="item_digest")], + ), + } + contains = { + "edge_id": "edge_contains", + "source": "ctrl_1", + "target": "child_1", + "kind": EdgeKind.CONTAINS, + } + aggregate_edge = { + "edge_id": "edge_agg", + "source": "child_1", + "target": "ctrl_1", + "kind": EdgeKind.BATCH_AGGREGATE, + "source_stuff_digest": "item_digest", + "target_stuff_digest": "agg_digest", + } + graph = self._make_graph( + nodes=[controller, child], + edges=[contains, aggregate_edge], + ) + graph_config = make_graph_config() + result = MermaidflowFactory.make_from_graphspec(graph, graph_config) + + # The aggregated_output stuff should be rendered (resolved on the fly) + assert "aggregated_output" in result.mermaid_code + # And there should be a dashed edge connecting them + dashed_lines = self._extract_dashed_edges(result.mermaid_code) + assert len(dashed_lines) >= 1, "Expected a dashed edge for aggregate, got none" diff --git a/tests/unit/pipelex/graph/test_graph_tracer.py b/tests/unit/pipelex/graph/test_graph_tracer.py index 50bdeb7eb..82fb0b975 100644 --- a/tests/unit/pipelex/graph/test_graph_tracer.py +++ b/tests/unit/pipelex/graph/test_graph_tracer.py @@ -872,3 +872,258 @@ def test_batch_aggregate_edges_contain_stuff_digests(self) -> None: edge = batch_aggregate_edges[0] assert edge.source_stuff_digest == "item_result_digest" assert edge.target_stuff_digest == "output_list_digest" + + def test_register_controller_output(self) -> None: + """Test that register_controller_output adds to output_specs and _stuff_producer_map. + + When a controller explicitly registers outputs, DATA edges should go from + the controller node to consumers of those outputs. + """ + tracer = GraphTracer() + context = tracer.setup(graph_id="controller-output-test", data_inclusion=make_defaulted_data_inclusion_config()) + + started_at = datetime.now(timezone.utc) + + # Controller node (e.g., PipeParallel) + controller_id, ctrl_ctx = tracer.on_pipe_start( + graph_context=context, + pipe_code="my_parallel", + pipe_type="PipeParallel", + node_kind=NodeKind.CONTROLLER, + started_at=started_at, + input_specs=[IOSpec(name="input_text", concept="Text", digest="input_digest")], + ) + + # Branch 1: produces output with digest "branch_output_1" + branch1_id, _ = tracer.on_pipe_start( + graph_context=ctrl_ctx, + pipe_code="branch_pipe_1", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at + timedelta(milliseconds=10), + input_specs=[IOSpec(name="input_text", concept="Text", digest="input_digest")], + ) + tracer.on_pipe_end_success( + node_id=branch1_id, + ended_at=started_at + timedelta(milliseconds=50), + output_spec=IOSpec(name="short_summary", concept="Text", digest="branch_output_1"), + ) + + # Branch 2: produces output with digest "branch_output_2" + branch2_id, _ = tracer.on_pipe_start( + graph_context=ctrl_ctx, + pipe_code="branch_pipe_2", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at + timedelta(milliseconds=10), + input_specs=[IOSpec(name="input_text", concept="Text", digest="input_digest")], + ) + tracer.on_pipe_end_success( + node_id=branch2_id, + ended_at=started_at + timedelta(milliseconds=50), + output_spec=IOSpec(name="long_summary", concept="Text", digest="branch_output_2"), + ) + + # Controller registers branch outputs (overriding sub-pipe registrations) + tracer.register_controller_output( + node_id=controller_id, + output_spec=IOSpec(name="short_summary", concept="Text", digest="branch_output_1"), + ) + tracer.register_controller_output( + node_id=controller_id, + output_spec=IOSpec(name="long_summary", concept="Text", digest="branch_output_2"), + ) + + # Consumer pipe that uses branch_output_1 + consumer_id, _ = tracer.on_pipe_start( + graph_context=context, + pipe_code="consumer_pipe", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at + timedelta(milliseconds=60), + input_specs=[IOSpec(name="summary", concept="Text", digest="branch_output_1")], + ) + tracer.on_pipe_end_success( + node_id=consumer_id, + ended_at=started_at + timedelta(milliseconds=100), + ) + + # End controller + tracer.on_pipe_end_success( + node_id=controller_id, + ended_at=started_at + timedelta(milliseconds=110), + ) + + graph_spec = tracer.teardown() + + assert graph_spec is not None + + # Verify controller node has 2 output specs + controller_node = next(node for node in graph_spec.nodes if node.node_id == controller_id) + assert len(controller_node.node_io.outputs) == 2 + output_names = {output.name for output in controller_node.node_io.outputs} + assert output_names == {"short_summary", "long_summary"} + + # Verify DATA edge goes from controller (not branch) to consumer + data_edges = [edge for edge in graph_spec.edges if edge.kind.is_data] + controller_to_consumer = [edge for edge in data_edges if edge.target == consumer_id] + assert len(controller_to_consumer) == 1 + assert controller_to_consumer[0].source == controller_id + + def test_passthrough_output_skipped(self) -> None: + """Test that on_pipe_end_success skips output registration when output matches an input. + + When a controller's main_stuff is unchanged from one of its inputs (pass-through), + the output should not be registered to avoid corrupting data edges. + """ + tracer = GraphTracer() + context = tracer.setup(graph_id="passthrough-test", data_inclusion=make_defaulted_data_inclusion_config()) + + started_at = datetime.now(timezone.utc) + + # Producer pipe creates stuff with digest "original_stuff" + producer_id, _ = tracer.on_pipe_start( + graph_context=context, + pipe_code="producer", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at, + ) + tracer.on_pipe_end_success( + node_id=producer_id, + ended_at=started_at + timedelta(milliseconds=50), + output_spec=IOSpec(name="output", concept="Text", digest="original_stuff"), + ) + + # Controller consumes "original_stuff" and its main_stuff is the same + controller_id, _ctrl_ctx = tracer.on_pipe_start( + graph_context=context, + pipe_code="my_parallel", + pipe_type="PipeParallel", + node_kind=NodeKind.CONTROLLER, + started_at=started_at + timedelta(milliseconds=60), + input_specs=[IOSpec(name="input_text", concept="Text", digest="original_stuff")], + ) + + # Controller ends with the same digest as its input (pass-through) + tracer.on_pipe_end_success( + node_id=controller_id, + ended_at=started_at + timedelta(milliseconds=100), + output_spec=IOSpec(name="input_text", concept="Text", digest="original_stuff"), + ) + + # Consumer should still get the edge from the original producer, not the controller + consumer_id, _ = tracer.on_pipe_start( + graph_context=context, + pipe_code="consumer", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at + timedelta(milliseconds=110), + input_specs=[IOSpec(name="input", concept="Text", digest="original_stuff")], + ) + tracer.on_pipe_end_success( + node_id=consumer_id, + ended_at=started_at + timedelta(milliseconds=150), + ) + + graph_spec = tracer.teardown() + + assert graph_spec is not None + + # Controller should have NO outputs (pass-through was skipped) + controller_node = next(node for node in graph_spec.nodes if node.node_id == controller_id) + assert len(controller_node.node_io.outputs) == 0 + + # DATA edges should go from producer to both controller (as input) and consumer + # The controller does NOT steal the producer registration (pass-through skipped) + data_edges = [edge for edge in graph_spec.edges if edge.kind.is_data] + assert len(data_edges) == 2 + assert all(edge.source == producer_id for edge in data_edges) + targets = {edge.target for edge in data_edges} + assert targets == {controller_id, consumer_id} + + def test_multiple_output_specs(self) -> None: + """Test that a node can have multiple outputs via register_controller_output. + + All registered outputs should produce correct DATA edges to their consumers. + """ + tracer = GraphTracer() + context = tracer.setup(graph_id="multi-output-test", data_inclusion=make_defaulted_data_inclusion_config()) + + started_at = datetime.now(timezone.utc) + + # Controller with multiple outputs + controller_id, _ = tracer.on_pipe_start( + graph_context=context, + pipe_code="multi_output_pipe", + pipe_type="PipeParallel", + node_kind=NodeKind.CONTROLLER, + started_at=started_at, + ) + + # Register three different outputs + tracer.register_controller_output( + node_id=controller_id, + output_spec=IOSpec(name="output_a", concept="Text", digest="digest_a"), + ) + tracer.register_controller_output( + node_id=controller_id, + output_spec=IOSpec(name="output_b", concept="Text", digest="digest_b"), + ) + tracer.register_controller_output( + node_id=controller_id, + output_spec=IOSpec(name="output_c", concept="Text", digest="digest_c"), + ) + + tracer.on_pipe_end_success( + node_id=controller_id, + ended_at=started_at + timedelta(milliseconds=100), + ) + + # Consumer A reads digest_a + consumer_a_id, _ = tracer.on_pipe_start( + graph_context=context, + pipe_code="consumer_a", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at + timedelta(milliseconds=110), + input_specs=[IOSpec(name="input", concept="Text", digest="digest_a")], + ) + tracer.on_pipe_end_success(node_id=consumer_a_id, ended_at=started_at + timedelta(milliseconds=120)) + + # Consumer B reads digest_b + consumer_b_id, _ = tracer.on_pipe_start( + graph_context=context, + pipe_code="consumer_b", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at + timedelta(milliseconds=130), + input_specs=[IOSpec(name="input", concept="Text", digest="digest_b")], + ) + tracer.on_pipe_end_success(node_id=consumer_b_id, ended_at=started_at + timedelta(milliseconds=140)) + + # Consumer C reads digest_c + consumer_c_id, _ = tracer.on_pipe_start( + graph_context=context, + pipe_code="consumer_c", + pipe_type="PipeLLM", + node_kind=NodeKind.OPERATOR, + started_at=started_at + timedelta(milliseconds=150), + input_specs=[IOSpec(name="input", concept="Text", digest="digest_c")], + ) + tracer.on_pipe_end_success(node_id=consumer_c_id, ended_at=started_at + timedelta(milliseconds=160)) + + graph_spec = tracer.teardown() + + assert graph_spec is not None + + # Controller should have 3 output specs + controller_node = next(node for node in graph_spec.nodes if node.node_id == controller_id) + assert len(controller_node.node_io.outputs) == 3 + + # 3 DATA edges: controller -> consumer_a, controller -> consumer_b, controller -> consumer_c + data_edges = [edge for edge in graph_spec.edges if edge.kind.is_data] + assert len(data_edges) == 3 + assert all(edge.source == controller_id for edge in data_edges) + targets = {edge.target for edge in data_edges} + assert targets == {consumer_a_id, consumer_b_id, consumer_c_id} diff --git a/tests/unit/pipelex/graph/test_mermaidflow.py b/tests/unit/pipelex/graph/test_mermaidflow.py index 599b4d693..c875dcf39 100644 --- a/tests/unit/pipelex/graph/test_mermaidflow.py +++ b/tests/unit/pipelex/graph/test_mermaidflow.py @@ -386,3 +386,89 @@ def test_subgraph_depth_coloring(self) -> None: # Should have multiple subgraphs with different colors assert "subgraph" in result.mermaid_code assert "style sg_" in result.mermaid_code # Subgraph styling + + def test_parallel_combine_stuff_rendered_inside_controller_subgraph(self) -> None: + """Test that PARALLEL_COMBINE target stuffs are rendered inside the controller's subgraph.""" + parallel_ctrl = { + "node_id": "parallel_ctrl", + "kind": NodeKind.CONTROLLER, + "pipe_code": "parallel_controller", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[], + outputs=[IOSpec(name="combined_output", concept="MergedText", digest="combined_digest_001")], + ), + } + branch_a = { + "node_id": "branch_a", + "kind": NodeKind.OPERATOR, + "pipe_code": "branch_a_pipe", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[], + outputs=[IOSpec(name="branch_a_out", concept="Text", digest="branch_a_digest")], + ), + } + branch_b = { + "node_id": "branch_b", + "kind": NodeKind.OPERATOR, + "pipe_code": "branch_b_pipe", + "status": NodeStatus.SUCCEEDED, + "node_io": NodeIOSpec( + inputs=[], + outputs=[IOSpec(name="branch_b_out", concept="Text", digest="branch_b_digest")], + ), + } + contains_a = { + "edge_id": "edge_contains_a", + "source": "parallel_ctrl", + "target": "branch_a", + "kind": EdgeKind.CONTAINS, + } + contains_b = { + "edge_id": "edge_contains_b", + "source": "parallel_ctrl", + "target": "branch_b", + "kind": EdgeKind.CONTAINS, + } + combine_a = { + "edge_id": "edge_combine_a", + "source": "branch_a", + "target": "parallel_ctrl", + "kind": EdgeKind.PARALLEL_COMBINE, + "source_stuff_digest": "branch_a_digest", + "target_stuff_digest": "combined_digest_001", + } + combine_b = { + "edge_id": "edge_combine_b", + "source": "branch_b", + "target": "parallel_ctrl", + "kind": EdgeKind.PARALLEL_COMBINE, + "source_stuff_digest": "branch_b_digest", + "target_stuff_digest": "combined_digest_001", + } + graph = self._make_graph( + nodes=[parallel_ctrl, branch_a, branch_b], + edges=[contains_a, contains_b, combine_a, combine_b], + ) + graph_config = make_graph_config() + result = MermaidflowFactory.make_from_graphspec(graph, graph_config) + + # The combined output stuff should appear inside the controller's subgraph + # (between subgraph ... and end) + lines = result.mermaid_code.split("\n") + subgraph_start_idx = None + subgraph_end_idx = None + for index_line, line in enumerate(lines): + if "subgraph" in line and "parallel_controller" in line: + subgraph_start_idx = index_line + if subgraph_start_idx is not None and subgraph_end_idx is None and line.strip() == "end": + subgraph_end_idx = index_line + break + + assert subgraph_start_idx is not None, "Controller subgraph not found" + assert subgraph_end_idx is not None, "Controller subgraph end not found" + + subgraph_content = "\n".join(lines[subgraph_start_idx : subgraph_end_idx + 1]) + assert "combined_output" in subgraph_content, "Combined output stuff should be inside the controller subgraph" + assert ":::stuff" in subgraph_content, "Combined output stuff should have :::stuff class styling" diff --git a/tests/unit/pipelex/language/test_plx_factory.py b/tests/unit/pipelex/language/test_mthds_factory.py similarity index 65% rename from tests/unit/pipelex/language/test_plx_factory.py rename to tests/unit/pipelex/language/test_mthds_factory.py index fcdac3eda..7988c13f2 100644 --- a/tests/unit/pipelex/language/test_plx_factory.py +++ b/tests/unit/pipelex/language/test_mthds_factory.py @@ -5,32 +5,32 @@ from pytest_mock import MockerFixture from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint -from pipelex.language.plx_config import PlxConfig, PlxConfigForConcepts, PlxConfigForPipes, PlxConfigInlineTables, PlxConfigStrings -from pipelex.language.plx_factory import PIPE_CATEGORY_FIELD_KEY, PlxFactory +from pipelex.language.mthds_config import MthdsConfig, MthdsConfigForConcepts, MthdsConfigForPipes, MthdsConfigInlineTables, MthdsConfigStrings +from pipelex.language.mthds_factory import PIPE_CATEGORY_FIELD_KEY, MthdsFactory from pipelex.pipe_operators.compose.pipe_compose_blueprint import PipeComposeBlueprint -class TestPlxFactoryUnit: - """Unit tests for PlxFactory methods.""" +class TestMthdsFactoryUnit: + """Unit tests for MthdsFactory methods.""" @pytest.fixture - def mock_plx_config(self) -> PlxConfig: - """Create a mock PLX configuration for testing.""" - return PlxConfig( - strings=PlxConfigStrings( + def mock_mthds_config(self) -> MthdsConfig: + """Create a mock MTHDS configuration for testing.""" + return MthdsConfig( + strings=MthdsConfigStrings( prefer_literal=True, force_multiline=False, length_limit_to_multiline=50, ensure_trailing_newline=True, ensure_leading_blank_line=False, ), - inline_tables=PlxConfigInlineTables( + inline_tables=MthdsConfigInlineTables( spaces_inside_curly_braces=True, ), - concepts=PlxConfigForConcepts( + concepts=MthdsConfigForConcepts( structure_field_ordering=["type", "description", "inputs", "output"], ), - pipes=PlxConfigForPipes( + pipes=MthdsConfigForPipes( field_ordering=["type", "description", "inputs", "output"], ), ) @@ -51,85 +51,85 @@ def sample_mapping_data(self) -> dict[str, Any]: ], } - def test_format_tomlkit_string_simple(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_simple(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test formatting simple strings.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) # Test simple string - result = PlxFactory.format_tomlkit_string("simple text") + result = MthdsFactory.format_tomlkit_string("simple text") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # The actual string value without quotes assert result.value == "simple text" - def test_format_tomlkit_string_multiline(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_multiline(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test formatting multiline strings.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) # Test string with newlines multiline_text = "line1\nline2\nline3" - result = PlxFactory.format_tomlkit_string(multiline_text) + result = MthdsFactory.format_tomlkit_string(multiline_text) assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should be multiline with trailing newline assert result.value == "line1\nline2\nline3\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_format_tomlkit_string_force_multiline(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_force_multiline(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test force multiline configuration.""" - mock_plx_config.strings.force_multiline = True - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + mock_mthds_config.strings.force_multiline = True + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.format_tomlkit_string("short") + result = MthdsFactory.format_tomlkit_string("short") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should be multiline even for short text assert result.value == "short\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_format_tomlkit_string_length_limit(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_length_limit(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test length limit for multiline conversion.""" - mock_plx_config.strings.length_limit_to_multiline = 10 - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + mock_mthds_config.strings.length_limit_to_multiline = 10 + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) long_text = "this is a very long text that exceeds the limit" - result = PlxFactory.format_tomlkit_string(long_text) + result = MthdsFactory.format_tomlkit_string(long_text) assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should be multiline due to length assert result.value == "this is a very long text that exceeds the limit\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_format_tomlkit_string_leading_blank_line(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_leading_blank_line(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test leading blank line configuration.""" - mock_plx_config.strings.ensure_leading_blank_line = True - mock_plx_config.strings.force_multiline = True - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + mock_mthds_config.strings.ensure_leading_blank_line = True + mock_mthds_config.strings.force_multiline = True + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.format_tomlkit_string("content") + result = MthdsFactory.format_tomlkit_string("content") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should have leading blank line assert result.value == "\ncontent\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_convert_dicts_to_inline_tables_simple_dict(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_simple_dict(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting simple dictionary to inline table.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_dict = {"key1": "value1", "key2": "value2"} - result = PlxFactory.convert_dicts_to_inline_tables(input_dict) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert result["key1"].value == "value1" assert result["key2"].value == "value2" - def test_convert_dicts_to_inline_tables_with_field_ordering(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_with_field_ordering(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting dictionary with field ordering preserves all fields.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_dict = {"key2": "value2", "key1": "value1", "key3": "value3"} field_ordering = ["key1", "key3"] - result = PlxFactory.convert_dicts_to_inline_tables(input_dict, field_ordering) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict, field_ordering) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # All input keys must be present in the result @@ -173,15 +173,15 @@ def test_convert_dicts_to_inline_tables_with_field_ordering(self, mocker: Mocker def test_convert_dicts_to_inline_tables_with_field_ordering_preserves_all_fields( self, mocker: MockerFixture, - mock_plx_config: PlxConfig, + mock_mthds_config: MthdsConfig, topic: str, input_dict: dict[str, Any], field_ordering: list[str], ): """Test that all input fields are preserved in the output regardless of field_ordering.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.convert_dicts_to_inline_tables(input_dict, field_ordering or None) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict, field_ordering or None) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] result_keys = set(result.keys()) @@ -195,23 +195,23 @@ def test_convert_dicts_to_inline_tables_with_field_ordering_preserves_all_fields else: assert result_value == expected_value, f"[{topic}] Value mismatch for key '{key}'" - def test_convert_dicts_to_inline_tables_nested_dict(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_nested_dict(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting nested dictionary.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_dict = {"outer": {"inner": "value"}} - result = PlxFactory.convert_dicts_to_inline_tables(input_dict) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert isinstance(result["outer"], tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert result["outer"]["inner"].value == "value" - def test_convert_dicts_to_inline_tables_list_with_dicts(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_list_with_dicts(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting list containing dictionaries.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_list = [{"name": "first", "value": 1}, {"name": "second", "value": 2}] - result = PlxFactory.convert_dicts_to_inline_tables(input_list) + result = MthdsFactory.convert_dicts_to_inline_tables(input_list) assert isinstance(result, tomlkit.items.Array) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert len(result) == 2 @@ -219,23 +219,23 @@ def test_convert_dicts_to_inline_tables_list_with_dicts(self, mocker: MockerFixt assert result[0]["name"].value == "first" assert result[0]["value"] == 1 - def test_convert_dicts_to_inline_tables_string_handling(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_string_handling(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test string handling in conversion.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) # Test simple string - result = PlxFactory.convert_dicts_to_inline_tables("simple string") + result = MthdsFactory.convert_dicts_to_inline_tables("simple string") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Test other types pass through - assert PlxFactory.convert_dicts_to_inline_tables(42) == 42 - assert PlxFactory.convert_dicts_to_inline_tables(True) is True + assert MthdsFactory.convert_dicts_to_inline_tables(42) == 42 + assert MthdsFactory.convert_dicts_to_inline_tables(True) is True - def test_convert_mapping_to_table(self, mocker: MockerFixture, mock_plx_config: PlxConfig, sample_mapping_data: dict[str, Any]): + def test_convert_mapping_to_table(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig, sample_mapping_data: dict[str, Any]): """Test converting mapping to table.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.convert_mapping_to_table(sample_mapping_data) + result = MthdsFactory.convert_mapping_to_table(sample_mapping_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "simple_field" in result @@ -243,14 +243,14 @@ def test_convert_mapping_to_table(self, mocker: MockerFixture, mock_plx_config: assert "list_field" in result assert "complex_list" in result - def test_convert_mapping_to_table_with_field_ordering(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_mapping_to_table_with_field_ordering(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting mapping with field ordering.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) mapping = {"field3": "value3", "field1": "value1", "field2": "value2"} field_ordering = ["field1", "field2"] - result = PlxFactory.convert_mapping_to_table(mapping, field_ordering) + result = MthdsFactory.convert_mapping_to_table(mapping, field_ordering) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Check ordering (note: tomlkit preserves insertion order) @@ -259,12 +259,12 @@ def test_convert_mapping_to_table_with_field_ordering(self, mocker: MockerFixtur assert keys[1] == "field2" assert keys[2] == "field3" - def test_convert_mapping_to_table_skips_category(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_mapping_to_table_skips_category(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test that category field is skipped.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) mapping = {"field1": "value1", PIPE_CATEGORY_FIELD_KEY: "should_be_skipped", "field2": "value2"} - result = PlxFactory.convert_mapping_to_table(mapping) + result = MthdsFactory.convert_mapping_to_table(mapping) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "field1" in result @@ -274,31 +274,31 @@ def test_convert_mapping_to_table_skips_category(self, mocker: MockerFixture, mo def test_add_spaces_to_inline_tables_simple(self): """Test adding spaces to simple inline tables.""" input_toml = "{key = value}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" def test_add_spaces_to_inline_tables_already_spaced(self): """Test that already spaced tables are preserved.""" input_toml = "{ key = value }" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" def test_add_spaces_to_inline_tables_nested(self): """Test adding spaces to nested inline tables.""" input_toml = "{outer = {inner = value}}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ outer = { inner = value } }" def test_add_spaces_to_inline_tables_with_jinja2(self): """Test that Jinja2 templates are preserved.""" input_toml = "template = '{{ variable }}' and {key = value}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "template = '{{ variable }}' and { key = value }" def test_add_spaces_to_inline_tables_complex(self): """Test complex inline table spacing.""" input_toml = "config = {db = {host = 'localhost', port = 5432}, cache = {enabled = true}}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) expected = "config = { db = { host = 'localhost', port = 5432 }, cache = { enabled = true } }" assert result == expected @@ -306,17 +306,17 @@ def test_add_spaces_to_inline_tables_partial_spacing(self): """Test partial spacing scenarios.""" # Left space only input_toml = "{ key = value}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" # Right space only input_toml = "{key = value }" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" - def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test making table object for pipe section.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) pipe_data = { "type": "PipeLLM", @@ -326,7 +326,7 @@ def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_plx_config: P "nested_config": {"param1": "value1", "param2": 42}, } - result = PlxFactory.make_table_obj_for_pipe(pipe_data) + result = MthdsFactory.make_table_obj_for_pipe(pipe_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "type" in result @@ -335,25 +335,25 @@ def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_plx_config: P assert "output" in result assert "nested_config" in result - def test_make_table_obj_for_concept_simple_string(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_simple_string(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test making table object for concept with simple string definition.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = {"SimpleConcept": "A simple concept definition"} - result = PlxFactory.make_table_obj_for_concept(concept_data) + result = MthdsFactory.make_table_obj_for_concept(concept_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "SimpleConcept" in result assert result["SimpleConcept"] == "A simple concept definition" - def test_make_table_obj_for_concept_with_structure(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_with_structure(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test making table object for concept with structure.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = {"ComplexConcept": {"description": "A complex concept", "structure": {"field1": "string", "field2": "int"}}} - result = PlxFactory.make_table_obj_for_concept(concept_data) + result = MthdsFactory.make_table_obj_for_concept(concept_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "ComplexConcept" in result @@ -361,22 +361,22 @@ def test_make_table_obj_for_concept_with_structure(self, mocker: MockerFixture, assert "description" in result["ComplexConcept"] assert "structure" in result["ComplexConcept"] - def test_make_table_obj_for_concept_structure_string(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_structure_string(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test concept with structure as string.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = {"ConceptWithStringStructure": {"structure": "SomeClass"}} - result = PlxFactory.make_table_obj_for_concept(concept_data) + result = MthdsFactory.make_table_obj_for_concept(concept_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] concept_table = result["ConceptWithStringStructure"] assert isinstance(concept_table, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert concept_table["structure"] == "SomeClass" - def test_make_table_obj_for_concept_invalid_structure(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_invalid_structure(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test error handling for invalid structure types.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = { "InvalidConcept": { @@ -385,49 +385,49 @@ def test_make_table_obj_for_concept_invalid_structure(self, mocker: MockerFixtur } with pytest.raises(TypeError, match="Structure field value is not a mapping"): - PlxFactory.make_table_obj_for_concept(concept_data) + MthdsFactory.make_table_obj_for_concept(concept_data) - def test_make_table_obj_for_concept_invalid_concept_value(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_invalid_concept_value(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test error handling for invalid concept value types.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = { "InvalidConcept": 123 # Invalid type } with pytest.raises(TypeError, match="Concept field value is not a mapping"): - PlxFactory.make_table_obj_for_concept(concept_data) + MthdsFactory.make_table_obj_for_concept(concept_data) - def test_dict_to_plx_styled_toml_with_spacing(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML with spacing enabled.""" - mock_plx_config.inline_tables.spaces_inside_curly_braces = True - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) - mock_add_spaces = mocker.patch.object(PlxFactory, "add_spaces_to_inline_tables", return_value="spaced_output") + def test_dict_to_mthds_styled_toml_with_spacing(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML with spacing enabled.""" + mock_mthds_config.inline_tables.spaces_inside_curly_braces = True + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) + mock_add_spaces = mocker.patch.object(MthdsFactory, "add_spaces_to_inline_tables", return_value="spaced_output") data = {"domain": "test", "description": "test domain"} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) assert result == "spaced_output" mock_add_spaces.assert_called_once() - def test_dict_to_plx_styled_toml_without_spacing(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML without spacing.""" - mock_plx_config.inline_tables.spaces_inside_curly_braces = False - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) - mock_add_spaces = mocker.patch.object(PlxFactory, "add_spaces_to_inline_tables") + def test_dict_to_mthds_styled_toml_without_spacing(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML without spacing.""" + mock_mthds_config.inline_tables.spaces_inside_curly_braces = False + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) + mock_add_spaces = mocker.patch.object(MthdsFactory, "add_spaces_to_inline_tables") data = {"domain": "test", "description": "test domain"} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) # Should not call add_spaces_to_inline_tables mock_add_spaces.assert_not_called() assert isinstance(result, str) - def test_dict_to_plx_styled_toml_empty_sections(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_dict_to_mthds_styled_toml_empty_sections(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test handling of empty sections.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) data: dict[str, Any] = { "domain": "test", @@ -435,41 +435,41 @@ def test_dict_to_plx_styled_toml_empty_sections(self, mocker: MockerFixture, moc "pipe": {}, # Empty pipe section } - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) # Empty sections should be skipped assert "concept" not in result assert "pipe" not in result assert "domain" in result - def test_dict_to_plx_styled_toml_with_pipe_section(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML with pipe section.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + def test_dict_to_mthds_styled_toml_with_pipe_section(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML with pipe section.""" + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) data = {"domain": "test", "pipe": {"test_pipe": {"type": "PipeLLM", "description": "Test pipe"}}} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) assert "domain" in result assert "[pipe.test_pipe]" in result assert "type" in result assert "description" in result - def test_dict_to_plx_styled_toml_with_concept_section(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML with concept section.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + def test_dict_to_mthds_styled_toml_with_concept_section(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML with concept section.""" + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) data = {"domain": "test", "concept": {"TestConcept": "A test concept"}} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) assert "domain" in result assert "[concept]" in result assert "TestConcept" in result - def test_pipe_compose_construct_serialization_format(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test PipeComposeBlueprint construct serializes to correct PLX format.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + def test_pipe_compose_construct_serialization_format(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test PipeComposeBlueprint construct serializes to correct MTHDS format.""" + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) blueprint = PipelexBundleBlueprint( domain="test_domain", @@ -488,22 +488,22 @@ def test_pipe_compose_construct_serialization_format(self, mocker: MockerFixture }, ) - plx_content = PlxFactory.make_plx_content(blueprint=blueprint) + mthds_content = MthdsFactory.make_mthds_content(blueprint=blueprint) # Should have nested table section, not inline - assert "[pipe.compose_test.construct]" in plx_content + assert "[pipe.compose_test.construct]" in mthds_content # Should use concise format { from = '...' } - assert "value = { from = 'data.field' }" in plx_content - assert "name = { from = 'info.name' }" in plx_content + assert "value = { from = 'data.field' }" in mthds_content + assert "name = { from = 'info.name' }" in mthds_content # Should NOT have internal field names - assert "construct_blueprint" not in plx_content - assert "fields" not in plx_content - assert "from_path" not in plx_content - assert "method" not in plx_content + assert "construct_blueprint" not in mthds_content + assert "fields" not in mthds_content + assert "from_path" not in mthds_content + assert "method" not in mthds_content - def test_pipe_compose_construct_fixed_and_template_serialization(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_pipe_compose_construct_fixed_and_template_serialization(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test PipeComposeBlueprint construct with FIXED and TEMPLATE methods serializes correctly.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) blueprint = PipelexBundleBlueprint( domain="test_domain", @@ -524,19 +524,19 @@ def test_pipe_compose_construct_fixed_and_template_serialization(self, mocker: M }, ) - plx_content = PlxFactory.make_plx_content(blueprint=blueprint) + mthds_content = MthdsFactory.make_mthds_content(blueprint=blueprint) # Should have nested table section - assert "[pipe.compose_mixed.construct]" in plx_content + assert "[pipe.compose_mixed.construct]" in mthds_content # Fixed values should appear directly - assert "fixed_string = 'hello world'" in plx_content - assert "fixed_number = 42" in plx_content + assert "fixed_string = 'hello world'" in mthds_content + assert "fixed_number = 42" in mthds_content # From var should use { from = '...' } - assert "from_var = { from = 'data.value' }" in plx_content + assert "from_var = { from = 'data.value' }" in mthds_content # Template should use { template = '...' } - assert "templated = { template = 'Hello {{ data.name }}!' }" in plx_content + assert "templated = { template = 'Hello {{ data.name }}!' }" in mthds_content # Should NOT have internal field names (as key names in construct) - assert "fixed_value" not in plx_content - assert "from_path" not in plx_content + assert "fixed_value" not in mthds_content + assert "from_path" not in mthds_content # Check that 'method' does not appear as a key in construct section - assert "method =" not in plx_content + assert "method =" not in mthds_content diff --git a/tests/unit/pipelex/language/test_mthds_schema.py b/tests/unit/pipelex/language/test_mthds_schema.py new file mode 100644 index 000000000..d6a0bcdb9 --- /dev/null +++ b/tests/unit/pipelex/language/test_mthds_schema.py @@ -0,0 +1,206 @@ +"""Tests for MTHDS JSON Schema generation.""" + +from __future__ import annotations + +from typing import Any, cast + +import pytest + +from pipelex.core.pipes.pipe_blueprint import PipeType +from pipelex.language.mthds_schema_generator import generate_mthds_schema + + +class TestMthdsSchemaGeneration: + """Tests for generate_mthds_schema() and its post-processing pipeline.""" + + @pytest.fixture(scope="class") + def schema(self) -> dict[str, Any]: + """Generate the schema once for all tests in this class.""" + return generate_mthds_schema() + + def test_schema_is_valid_draft4(self, schema: dict[str, Any]) -> None: + """Verify the schema uses Draft 4 conventions, not Draft 2020-12.""" + # Must have definitions, not $defs + assert "definitions" in schema, "Schema should use 'definitions' (Draft 4), not '$defs'" + assert "$defs" not in schema, "Schema should not contain '$defs' (Draft 2020-12)" + + # Check no const anywhere in the schema (should be converted to enum) + _assert_key_absent_recursive(schema, "const", "const should be converted to single-value enum") + + # Check no discriminator anywhere in the schema (not in Draft 4) + _assert_key_absent_recursive(schema, "discriminator", "discriminator is not part of Draft 4") + + # Must have $schema pointing to Draft 4 + assert schema.get("$schema") == "http://json-schema.org/draft-04/schema#" + + def test_exclusive_minimum_is_draft4_boolean(self, schema: dict[str, Any]) -> None: + """Verify exclusiveMinimum/exclusiveMaximum use Draft 4 boolean syntax, not Draft 6+ number syntax. + + Draft 4: "minimum": 0, "exclusiveMinimum": true + Draft 6+: "exclusiveMinimum": 0 (number, standalone) + """ + exclusive_nodes: list[tuple[str, dict[str, Any]]] = [] + _collect_exclusive_nodes(schema, "", exclusive_nodes) + + assert len(exclusive_nodes) > 0, "Schema should contain at least one exclusiveMinimum or exclusiveMaximum" + + for path, node in exclusive_nodes: + if "exclusiveMinimum" in node: + assert node["exclusiveMinimum"] is True, ( + f"exclusiveMinimum at {path} should be boolean true (Draft 4), got {node['exclusiveMinimum']!r}" + ) + assert "minimum" in node, f"exclusiveMinimum at {path} requires a companion 'minimum' field in Draft 4" + if "exclusiveMaximum" in node: + assert node["exclusiveMaximum"] is True, ( + f"exclusiveMaximum at {path} should be boolean true (Draft 4), got {node['exclusiveMaximum']!r}" + ) + assert "maximum" in node, f"exclusiveMaximum at {path} requires a companion 'maximum' field in Draft 4" + + def test_source_field_excluded(self, schema: dict[str, Any]) -> None: + """Verify that 'source' field is not present in any definition.""" + # Check root properties + root_props = schema.get("properties", {}) + assert "source" not in root_props, "source should be excluded from root properties" + + # Check all definitions + definitions = schema.get("definitions", {}) + for def_name, def_schema in definitions.items(): + props = def_schema.get("properties", {}) + assert "source" not in props, f"source should be excluded from {def_name}" + + def test_pipe_category_field_excluded(self, schema: dict[str, Any]) -> None: + """Verify that 'pipe_category' is not present in pipe definitions.""" + definitions = schema.get("definitions", {}) + pipe_def_names = [def_name for def_name in definitions if def_name.startswith("Pipe") and def_name.endswith("Blueprint")] + + assert len(pipe_def_names) > 0, "Should have pipe blueprint definitions" + + for def_name in pipe_def_names: + props = definitions[def_name].get("properties", {}) + assert "pipe_category" not in props, f"pipe_category should be excluded from {def_name}" + + def test_construct_alias_used(self, schema: dict[str, Any]) -> None: + """Verify PipeComposeBlueprint uses 'construct' alias, not 'construct_blueprint'.""" + definitions = schema.get("definitions", {}) + compose_def = definitions.get("PipeComposeBlueprint", {}) + props = compose_def.get("properties", {}) + + assert "construct" in props, "PipeComposeBlueprint should have 'construct' (alias), not 'construct_blueprint'" + assert "construct_blueprint" not in props, "Internal name 'construct_blueprint' should not appear in schema" + + def test_all_pipe_types_present(self, schema: dict[str, Any]) -> None: + """Verify all 9 pipe types are represented in the schema definitions.""" + definitions = schema.get("definitions", {}) + + expected_blueprint_names = { + "PipeFuncBlueprint", + "PipeImgGenBlueprint", + "PipeComposeBlueprint", + "PipeLLMBlueprint", + "PipeExtractBlueprint", + "PipeBatchBlueprint", + "PipeConditionBlueprint", + "PipeParallelBlueprint", + "PipeSequenceBlueprint", + } + + for blueprint_name in expected_blueprint_names: + assert blueprint_name in definitions, f"{blueprint_name} should be present in schema definitions" + + # Also verify we have 9 pipe types matching the PipeType enum + assert len(PipeType.value_list()) == 9, "Should have exactly 9 pipe types" + + def test_construct_schema_matches_mthds_format(self, schema: dict[str, Any]) -> None: + """Verify ConstructBlueprint uses additionalProperties, not 'fields' wrapper.""" + definitions = schema.get("definitions", {}) + construct_def = definitions.get("ConstructBlueprint", {}) + + # Should use additionalProperties (MTHDS format: fields at root) + assert "additionalProperties" in construct_def, "ConstructBlueprint should use additionalProperties for MTHDS-format fields" + + # Should not have a 'fields' property (internal model structure) + props = construct_def.get("properties", {}) + assert "fields" not in props, "ConstructBlueprint should not expose internal 'fields' wrapper" + + # Should require at least one field + assert construct_def.get("minProperties") == 1, "ConstructBlueprint should require at least one field" + + def test_taplo_metadata_present(self, schema: dict[str, Any]) -> None: + """Verify root schema has x-taplo.initKeys metadata.""" + assert "x-taplo" in schema, "Schema should have x-taplo metadata" + taplo_meta = schema["x-taplo"] + assert "initKeys" in taplo_meta, "x-taplo should have initKeys" + assert "domain" in taplo_meta["initKeys"], "initKeys should include 'domain'" + + def test_schema_has_title_and_comment(self, schema: dict[str, Any]) -> None: + """Verify the schema has proper title and version comment.""" + assert schema.get("title") == "MTHDS File Schema" + assert "$comment" in schema + assert "PipelexBundleBlueprint" in schema["$comment"] + + def test_ref_paths_use_definitions(self, schema: dict[str, Any]) -> None: + """Verify all $ref paths use #/definitions/ (Draft 4), not #/$defs/.""" + refs: list[str] = [] + _collect_refs_recursive(schema, refs) + + for ref_value in refs: + assert "#/$defs/" not in ref_value, f"$ref should use #/definitions/, got: {ref_value}" + + def test_construct_field_schema_has_all_methods(self, schema: dict[str, Any]) -> None: + """Verify the construct field schema covers all 4 composition methods.""" + definitions = schema.get("definitions", {}) + field_def = definitions.get("ConstructFieldBlueprint", {}) + + any_of = field_def.get("anyOf", []) + assert len(any_of) >= 4, "ConstructFieldBlueprint should have at least 4 anyOf variants" + + # Check we have the key formats: raw values, {from: ...}, {template: ...}, nested + descriptions = [item.get("description", "") for item in any_of] + has_from = any("from" in desc.lower() or "variable" in desc.lower() for desc in descriptions) + has_template = any("template" in desc.lower() for desc in descriptions) + has_nested = any("nested" in desc.lower() for desc in descriptions) + + assert has_from, "Should have a 'from' (variable reference) variant" + assert has_template, "Should have a 'template' variant" + assert has_nested, "Should have a 'nested construct' variant" + + +def _assert_key_absent_recursive(node: Any, key: str, message: str) -> None: + """Assert that a key is not present anywhere in a nested dict/list structure.""" + if isinstance(node, dict): + typed_node = cast("dict[str, Any]", node) + assert key not in typed_node, f"{message} (found in dict with keys: {list(typed_node.keys())[:5]})" + for child_value in typed_node.values(): + _assert_key_absent_recursive(child_value, key, message) + elif isinstance(node, list): + typed_list = cast("list[Any]", node) + for child_item in typed_list: + _assert_key_absent_recursive(child_item, key, message) + + +def _collect_refs_recursive(node: Any, refs: list[str]) -> None: + """Collect all $ref values from a nested dict/list structure.""" + if isinstance(node, dict): + typed_node = cast("dict[str, Any]", node) + if "$ref" in typed_node and isinstance(typed_node["$ref"], str): + refs.append(typed_node["$ref"]) + for child_value in typed_node.values(): + _collect_refs_recursive(child_value, refs) + elif isinstance(node, list): + typed_list = cast("list[Any]", node) + for child_item in typed_list: + _collect_refs_recursive(child_item, refs) + + +def _collect_exclusive_nodes(node: Any, path: str, results: list[tuple[str, dict[str, Any]]]) -> None: + """Collect all nodes that contain exclusiveMinimum or exclusiveMaximum.""" + if isinstance(node, dict): + typed_node = cast("dict[str, Any]", node) + if "exclusiveMinimum" in typed_node or "exclusiveMaximum" in typed_node: + results.append((path, typed_node)) + for key, child_value in typed_node.items(): + _collect_exclusive_nodes(child_value, f"{path}.{key}", results) + elif isinstance(node, list): + typed_list = cast("list[Any]", node) + for index, child_item in enumerate(typed_list): + _collect_exclusive_nodes(child_item, f"{path}[{index}]", results) diff --git a/tests/unit/pipelex/libraries/test_concept_validation_cross_package.py b/tests/unit/pipelex/libraries/test_concept_validation_cross_package.py new file mode 100644 index 000000000..411b6fd0c --- /dev/null +++ b/tests/unit/pipelex/libraries/test_concept_validation_cross_package.py @@ -0,0 +1,107 @@ +import pytest + +from pipelex.core.concepts.concept import Concept +from pipelex.libraries.concept.concept_library import ConceptLibrary +from pipelex.libraries.concept.exceptions import ConceptLibraryError +from pipelex.libraries.domain.domain_library import DomainLibrary +from pipelex.libraries.exceptions import LibraryError +from pipelex.libraries.library import Library +from pipelex.libraries.library_factory import LibraryFactory +from pipelex.libraries.pipe.pipe_library import PipeLibrary + + +def _make_stub_concept(code: str, domain_code: str, refines: str | None = None) -> Concept: + """Create a minimal Concept for testing.""" + return Concept( + code=code, + domain_code=domain_code, + description="Test concept", + structure_class_name="TextContent", + refines=refines, + ) + + +def _make_child_library() -> Library: + """Create a minimal child library (no native concepts needed).""" + return Library( + domain_library=DomainLibrary.make_empty(), + concept_library=ConceptLibrary.make_empty(), + pipe_library=PipeLibrary.make_empty(), + ) + + +class TestConceptValidationCrossPackageLibrary: + """Tests for cross-package concept validation at the library level.""" + + def test_validation_static_skips_cross_package_refines(self): + """validation_static should not raise for cross-package refines even though target is not in root.""" + concept = _make_stub_concept( + code="RefinedScore", + domain_code="my_domain", + refines="scoring_dep->scoring.WeightedScore", + ) + # This should NOT raise, because cross-package refines are skipped + library = ConceptLibrary(root={"my_domain.RefinedScore": concept}) + assert "my_domain.RefinedScore" in library.root + + def test_validation_static_still_catches_missing_local_refines(self): + """validation_static still raises for missing local refines targets.""" + concept = _make_stub_concept( + code="RefinedScore", + domain_code="my_domain", + refines="my_domain.MissingBase", + ) + with pytest.raises(ConceptLibraryError, match="no concept with the code"): + ConceptLibrary(root={"my_domain.RefinedScore": concept}) + + def test_validate_concept_library_catches_missing_cross_package_target(self): + """validate_concept_library_with_libraries raises when cross-package target is missing in loaded dep.""" + library = LibraryFactory.make_empty() + # Add child library that is empty (target concept not present) + child = _make_child_library() + library.dependency_libraries["scoring_dep"] = child + + # Add concept with cross-package refines to main library + concept = _make_stub_concept( + code="RefinedScore", + domain_code="my_domain", + refines="scoring_dep->scoring.WeightedScore", + ) + library.concept_library.add_new_concept(concept) + + with pytest.raises(LibraryError, match="was not found in dependency"): + library.validate_concept_library_with_libraries() + + def test_validate_concept_library_passes_with_loaded_dependency(self): + """validate_concept_library_with_libraries passes when target exists in child library.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + target_concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + child.concept_library.add_new_concept(target_concept) + library.dependency_libraries["scoring_dep"] = child + + # Add concept with cross-package refines + concept = _make_stub_concept( + code="RefinedScore", + domain_code="my_domain", + refines="scoring_dep->scoring.WeightedScore", + ) + library.concept_library.add_new_concept(concept) + + # Should not raise + library.validate_concept_library_with_libraries() + + def test_validate_concept_library_skips_unloaded_dependency(self): + """validate_concept_library_with_libraries skips validation for unloaded dependencies.""" + library = LibraryFactory.make_empty() + # No child library registered for "unknown_dep" + + concept = _make_stub_concept( + code="RefinedScore", + domain_code="my_domain", + refines="unknown_dep->scoring.WeightedScore", + ) + library.concept_library.add_new_concept(concept) + + # Should not raise — skips validation for unloaded deps + library.validate_concept_library_with_libraries() diff --git a/tests/unit/pipelex/libraries/test_library_isolation.py b/tests/unit/pipelex/libraries/test_library_isolation.py new file mode 100644 index 000000000..7996dee56 --- /dev/null +++ b/tests/unit/pipelex/libraries/test_library_isolation.py @@ -0,0 +1,168 @@ +from pytest_mock import MockerFixture + +from pipelex.core.concepts.concept import Concept +from pipelex.libraries.concept.concept_library import ConceptLibrary +from pipelex.libraries.domain.domain_library import DomainLibrary +from pipelex.libraries.library import Library +from pipelex.libraries.library_factory import LibraryFactory +from pipelex.libraries.pipe.pipe_library import PipeLibrary + + +def _make_stub_concept(code: str, domain_code: str) -> Concept: + """Create a minimal Concept for testing.""" + return Concept( + code=code, + domain_code=domain_code, + description="Test concept", + structure_class_name="TextContent", + ) + + +def _make_child_library() -> Library: + """Create a minimal child library (no native concepts needed).""" + return Library( + domain_library=DomainLibrary.make_empty(), + concept_library=ConceptLibrary.make_empty(), + pipe_library=PipeLibrary.make_empty(), + ) + + +class TestLibraryIsolation: + """Tests for per-package library isolation via dependency_libraries.""" + + def test_dependency_library_created(self): + """dependency_libraries field exists and starts empty on a fresh Library.""" + library = LibraryFactory.make_empty() + assert library.dependency_libraries == {} + + def test_register_and_get_dependency_library(self): + """get_dependency_library() retrieves a registered child library.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + library.dependency_libraries["scoring_dep"] = child + assert library.get_dependency_library("scoring_dep") is child + + def test_get_dependency_library_returns_none_for_missing(self): + """get_dependency_library() returns None for unknown alias.""" + library = LibraryFactory.make_empty() + assert library.get_dependency_library("unknown") is None + + def test_concept_isolation_no_native_key_in_main(self): + """Concepts in child library are NOT in main concept_library with native keys.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + child.concept_library.add_new_concept(concept) + library.dependency_libraries["scoring_dep"] = child + + # The concept should NOT be in the main library with its native key + assert not library.concept_library.is_concept_exists("scoring.WeightedScore") + + def test_cross_package_lookup_via_alias(self): + """Cross-package concept lookup via aliased key in main library works.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + child.concept_library.add_new_concept(concept) + library.dependency_libraries["scoring_dep"] = child + + # Add aliased entry to main library (as _load_single_dependency does) + library.concept_library.add_dependency_concept(alias="scoring_dep", concept=concept) + + result = library.concept_library.get_required_concept("scoring_dep->scoring.WeightedScore") + assert result.code == "WeightedScore" + + def test_resolve_concept_routes_through_child(self): + """resolve_concept() routes cross-package refs through child library.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + child.concept_library.add_new_concept(concept) + library.dependency_libraries["scoring_dep"] = child + + resolved = library.resolve_concept("scoring_dep->scoring.WeightedScore") + assert resolved is not None + assert resolved.code == "WeightedScore" + assert resolved.concept_ref == "scoring.WeightedScore" + + def test_resolve_concept_returns_none_for_missing_alias(self): + """resolve_concept() returns None when alias has no child library.""" + library = LibraryFactory.make_empty() + assert library.resolve_concept("unknown_dep->scoring.WeightedScore") is None + + def test_resolve_concept_returns_none_for_missing_concept_in_child(self): + """resolve_concept() returns None when concept not in child library.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + library.dependency_libraries["scoring_dep"] = child + assert library.resolve_concept("scoring_dep->scoring.Missing") is None + + def test_resolve_concept_local_ref(self): + """resolve_concept() falls back to main library for local refs.""" + library = LibraryFactory.make_empty() + concept = _make_stub_concept(code="LocalConcept", domain_code="local") + library.concept_library.add_new_concept(concept) + + resolved = library.resolve_concept("local.LocalConcept") + assert resolved is not None + assert resolved.code == "LocalConcept" + + def test_teardown_cleans_children(self): + """teardown() clears dependency_libraries.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + concept = _make_stub_concept(code="WeightedScore", domain_code="scoring") + child.concept_library.add_new_concept(concept) + library.dependency_libraries["scoring_dep"] = child + + library.teardown() + assert library.dependency_libraries == {} + + def test_concept_name_collision_two_deps(self): + """Two deps with same concept code in different domains cause no conflict.""" + library = LibraryFactory.make_empty() + + # First dep: scoring_dep with PkgTestWeightedScore in scoring domain + child_scoring = _make_child_library() + scoring_concept = _make_stub_concept(code="PkgTestWeightedScore", domain_code="pkg_test_scoring_dep") + child_scoring.concept_library.add_new_concept(scoring_concept) + library.dependency_libraries["scoring_dep"] = child_scoring + + # Second dep: analytics_dep with PkgTestWeightedScore in analytics domain + child_analytics = _make_child_library() + analytics_concept = _make_stub_concept(code="PkgTestWeightedScore", domain_code="pkg_test_analytics_dep") + child_analytics.concept_library.add_new_concept(analytics_concept) + library.dependency_libraries["analytics_dep"] = child_analytics + + # Add aliased entries to main library + library.concept_library.add_dependency_concept(alias="scoring_dep", concept=scoring_concept) + library.concept_library.add_dependency_concept(alias="analytics_dep", concept=analytics_concept) + + # Both resolve correctly through their own child libraries + resolved_scoring = library.resolve_concept("scoring_dep->pkg_test_scoring_dep.PkgTestWeightedScore") + resolved_analytics = library.resolve_concept("analytics_dep->pkg_test_analytics_dep.PkgTestWeightedScore") + assert resolved_scoring is not None + assert resolved_analytics is not None + assert resolved_scoring.domain_code == "pkg_test_scoring_dep" + assert resolved_analytics.domain_code == "pkg_test_analytics_dep" + + def test_has_unresolved_cross_package_deps_with_child_library(self, mocker: MockerFixture): + """_has_unresolved_cross_package_deps returns False when alias has child library.""" + library = LibraryFactory.make_empty() + child = _make_child_library() + library.dependency_libraries["scoring_dep"] = child + + mock_pipe = mocker.MagicMock() + mock_pipe.pipe_dependencies.return_value = ["scoring_dep->pkg_test_scoring_dep.pkg_test_compute_score"] + + # Even though the pipe isn't in the main pipe library, the alias has a child library + assert library._has_unresolved_cross_package_deps(mock_pipe) is False # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + + def test_has_unresolved_cross_package_deps_without_child_library(self, mocker: MockerFixture): + """_has_unresolved_cross_package_deps returns True when alias has no child library.""" + library = LibraryFactory.make_empty() + + mock_pipe = mocker.MagicMock() + mock_pipe.pipe_dependencies.return_value = ["unknown_dep->domain.pipe"] + + assert library._has_unresolved_cross_package_deps(mock_pipe) is True # noqa: SLF001 # pyright: ignore[reportPrivateUsage] diff --git a/tests/unit/pipelex/libraries/test_mthds_version_warning.py b/tests/unit/pipelex/libraries/test_mthds_version_warning.py new file mode 100644 index 000000000..9ada0b254 --- /dev/null +++ b/tests/unit/pipelex/libraries/test_mthds_version_warning.py @@ -0,0 +1,93 @@ +from pathlib import Path + +from pytest_mock import MockerFixture + +from pipelex.core.packages.dependency_resolver import ResolvedDependency +from pipelex.core.packages.manifest import MthdsPackageManifest +from pipelex.libraries.library_factory import LibraryFactory +from pipelex.libraries.library_manager import LibraryManager + + +class TestMthdsVersionWarning: + """Tests for _warn_if_mthds_version_unsatisfied runtime warning.""" + + def test_warning_emitted_when_version_unsatisfied(self, mocker: MockerFixture) -> None: + """Warning emitted when current MTHDS standard version does not satisfy the constraint.""" + mocker.patch("pipelex.libraries.library_manager.MTHDS_STANDARD_VERSION", "1.0.0") + mock_log = mocker.patch("pipelex.libraries.library_manager.log") + + manager = LibraryManager() + manager._warn_if_mthds_version_unsatisfied( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + mthds_version_constraint="^2.0.0", + package_address="github.com/org/pkg", + ) + + mock_log.warning.assert_called_once() + warning_msg = mock_log.warning.call_args[0][0] + assert "github.com/org/pkg" in warning_msg + assert "^2.0.0" in warning_msg + assert "1.0.0" in warning_msg + + def test_no_warning_when_version_satisfied(self, mocker: MockerFixture) -> None: + """No warning emitted when current MTHDS standard version satisfies the constraint.""" + mocker.patch("pipelex.libraries.library_manager.MTHDS_STANDARD_VERSION", "1.0.0") + mock_log = mocker.patch("pipelex.libraries.library_manager.log") + + manager = LibraryManager() + manager._warn_if_mthds_version_unsatisfied( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + mthds_version_constraint="^1.0.0", + package_address="github.com/org/pkg", + ) + + mock_log.warning.assert_not_called() + + def test_warning_on_unparseable_constraint(self, mocker: MockerFixture) -> None: + """Warning emitted when the constraint is not parseable by the semver engine.""" + mock_log = mocker.patch("pipelex.libraries.library_manager.log") + + manager = LibraryManager() + manager._warn_if_mthds_version_unsatisfied( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + mthds_version_constraint=">>>garbage", + package_address="github.com/org/pkg", + ) + + mock_log.warning.assert_called_once() + warning_msg = mock_log.warning.call_args[0][0] + assert "Could not parse" in warning_msg + + def test_warning_emitted_for_dependency_mthds_version(self, mocker: MockerFixture, tmp_path: Path) -> None: + """Warning emitted when a dependency manifest has unsatisfied mthds_version.""" + mocker.patch("pipelex.libraries.library_manager.MTHDS_STANDARD_VERSION", "1.0.0") + mock_log = mocker.patch("pipelex.libraries.library_manager.log") + + # Create a minimal .mthds file so the interpreter can parse it + mthds_file = tmp_path / "dep.mthds" + mthds_file.write_text('domain = "dep_domain"\n') + + dep_manifest = MthdsPackageManifest( + address="github.com/org/dep-pkg", + version="1.0.0", + description="A dependency", + mthds_version="^2.0.0", + ) + resolved_dep = ResolvedDependency( + alias="dep_alias", + address="github.com/org/dep-pkg", + manifest=dep_manifest, + package_root=tmp_path, + mthds_files=[mthds_file], + exported_pipe_codes=None, + ) + + manager = LibraryManager() + library = LibraryFactory.make_empty() + + manager._load_single_dependency( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + library=library, + resolved_dep=resolved_dep, + ) + + # Verify a version warning was emitted for the dependency address + warning_calls = [call_args[0][0] for call_args in mock_log.warning.call_args_list] + dep_version_warnings = [msg for msg in warning_calls if "github.com/org/dep-pkg" in msg and "^2.0.0" in msg] + assert len(dep_version_warnings) >= 1 diff --git a/tests/unit/pipelex/libraries/test_pipe_library_lookup.py b/tests/unit/pipelex/libraries/test_pipe_library_lookup.py new file mode 100644 index 000000000..9767e1379 --- /dev/null +++ b/tests/unit/pipelex/libraries/test_pipe_library_lookup.py @@ -0,0 +1,101 @@ +from typing import Any + +import pytest +from pytest_mock import MockerFixture + +from pipelex.libraries.pipe.exceptions import PipeNotFoundError +from pipelex.libraries.pipe.pipe_library import PipeLibrary + + +def _make_stub_pipe(mocker: MockerFixture, code: str, domain_code: str) -> Any: + """Create a minimal mock pipe with code and domain_code.""" + mock_pipe = mocker.MagicMock() + mock_pipe.code = code + mock_pipe.domain_code = domain_code + return mock_pipe + + +class TestPipeLibraryLookup: + """Tests for PipeLibrary.get_optional_pipe domain enforcement and malformed-ref safety.""" + + def test_bare_code_lookup(self, mocker: MockerFixture): + """Bare code lookup still works.""" + library = PipeLibrary.make_empty() + mock_pipe = _make_stub_pipe(mocker, code="compute_score", domain_code="scoring") + library.root["compute_score"] = mock_pipe + result = library.get_optional_pipe("compute_score") + assert result is mock_pipe + + def test_domain_qualified_ref_correct_domain(self, mocker: MockerFixture): + """Domain-qualified ref resolves when pipe domain matches.""" + library = PipeLibrary.make_empty() + mock_pipe = _make_stub_pipe(mocker, code="compute_score", domain_code="scoring") + library.root["compute_score"] = mock_pipe + result = library.get_optional_pipe("scoring.compute_score") + assert result is mock_pipe + + def test_domain_qualified_ref_wrong_domain(self, mocker: MockerFixture): + """Domain-qualified ref returns None when pipe domain does not match.""" + library = PipeLibrary.make_empty() + mock_pipe = _make_stub_pipe(mocker, code="compute_score", domain_code="scoring") + library.root["compute_score"] = mock_pipe + result = library.get_optional_pipe("wrong_domain.compute_score") + assert result is None + + def test_cross_package_ref_correct_domain(self, mocker: MockerFixture): + """Cross-package ref resolves when pipe domain matches.""" + library = PipeLibrary.make_empty() + mock_pipe = _make_stub_pipe(mocker, code="compute_score", domain_code="scoring") + library.add_dependency_pipe(alias="lib", pipe=mock_pipe) + result = library.get_optional_pipe("lib->scoring.compute_score") + assert result is mock_pipe + + def test_cross_package_ref_wrong_domain(self, mocker: MockerFixture): + """Cross-package ref returns None when pipe domain does not match.""" + library = PipeLibrary.make_empty() + mock_pipe = _make_stub_pipe(mocker, code="compute_score", domain_code="scoring") + library.add_dependency_pipe(alias="lib", pipe=mock_pipe) + result = library.get_optional_pipe("lib->wrong_domain.compute_score") + assert result is None + + @pytest.mark.parametrize( + "malformed_ref", + [ + "foo..bar", + ".foo", + "foo.", + ], + ) + def test_malformed_dotted_ref_returns_none(self, malformed_ref: str): + """Malformed dotted refs return None instead of raising.""" + library = PipeLibrary.make_empty() + result = library.get_optional_pipe(malformed_ref) + assert result is None + + @pytest.mark.parametrize( + "malformed_ref", + [ + "lib->foo..bar", + "lib->.foo", + "lib->foo.", + ], + ) + def test_malformed_cross_package_ref_returns_none(self, malformed_ref: str): + """Malformed cross-package refs return None instead of raising.""" + library = PipeLibrary.make_empty() + result = library.get_optional_pipe(malformed_ref) + assert result is None + + def test_get_required_pipe_malformed_raises_not_found(self): + """Malformed ref through get_required_pipe raises PipeNotFoundError, not QualifiedRefError.""" + library = PipeLibrary.make_empty() + with pytest.raises(PipeNotFoundError): + library.get_required_pipe("foo..bar") + + def test_get_required_pipe_domain_mismatch_raises_not_found(self, mocker: MockerFixture): + """Domain mismatch through get_required_pipe raises PipeNotFoundError.""" + library = PipeLibrary.make_empty() + mock_pipe = _make_stub_pipe(mocker, code="compute_score", domain_code="scoring") + library.root["compute_score"] = mock_pipe + with pytest.raises(PipeNotFoundError): + library.get_required_pipe("wrong_domain.compute_score") diff --git a/tests/unit/pipelex/libraries/test_standalone_reserved_domains.py b/tests/unit/pipelex/libraries/test_standalone_reserved_domains.py new file mode 100644 index 000000000..92da46ca5 --- /dev/null +++ b/tests/unit/pipelex/libraries/test_standalone_reserved_domains.py @@ -0,0 +1,130 @@ +from pathlib import Path + +import pytest +from pytest_mock import MockerFixture + +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.core.packages.exceptions import ManifestError +from pipelex.libraries.exceptions import LibraryLoadingError +from pipelex.libraries.library_manager import LibraryManager + + +class TestStandaloneReservedDomains: + """Tests that reserved domain enforcement applies to standalone bundles (no manifest).""" + + @pytest.mark.parametrize( + "reserved_domain", + [ + "native", + "mthds", + "pipelex", + ], + ) + def test_standalone_bundle_reserved_domain_raises( + self, + mocker: MockerFixture, + tmp_path: Path, + reserved_domain: str, + ) -> None: + """Loading a standalone bundle with a reserved domain should raise LibraryLoadingError.""" + # Patch find_package_manifest to return None (no manifest = standalone) + mocker.patch("pipelex.libraries.library_manager.find_package_manifest", return_value=None) + + blueprint = PipelexBundleBlueprint( + domain=reserved_domain, + source="test_standalone.mthds", + ) + + dummy_path = tmp_path / "test_standalone.mthds" + dummy_path.touch() + + manager = LibraryManager() + with pytest.raises(LibraryLoadingError, match="Reserved domain violations"): + manager._check_package_visibility( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + blueprints=[blueprint], + mthds_paths=[dummy_path], + ) + + def test_standalone_bundle_non_reserved_domain_passes( + self, + mocker: MockerFixture, + tmp_path: Path, + ) -> None: + """Loading a standalone bundle with a non-reserved domain should not raise.""" + mocker.patch("pipelex.libraries.library_manager.find_package_manifest", return_value=None) + + blueprint = PipelexBundleBlueprint( + domain="legal", + source="test_standalone.mthds", + ) + + dummy_path = tmp_path / "test_standalone.mthds" + dummy_path.touch() + + manager = LibraryManager() + result = manager._check_package_visibility( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + blueprints=[blueprint], + mthds_paths=[dummy_path], + ) + assert result is None + + @pytest.mark.parametrize( + "reserved_domain", + [ + "native", + "mthds", + "pipelex", + ], + ) + def test_manifest_error_still_checks_reserved_domains( + self, + mocker: MockerFixture, + tmp_path: Path, + reserved_domain: str, + ) -> None: + """ManifestError should not bypass reserved domain validation.""" + mocker.patch( + "pipelex.libraries.library_manager.find_package_manifest", + side_effect=ManifestError(message="corrupt METHODS.toml"), + ) + + blueprint = PipelexBundleBlueprint( + domain=reserved_domain, + source="test_bad_manifest.mthds", + ) + + dummy_path = tmp_path / "test_bad_manifest.mthds" + dummy_path.touch() + + manager = LibraryManager() + with pytest.raises(LibraryLoadingError, match="Reserved domain violations"): + manager._check_package_visibility( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + blueprints=[blueprint], + mthds_paths=[dummy_path], + ) + + def test_manifest_error_non_reserved_domain_passes( + self, + mocker: MockerFixture, + tmp_path: Path, + ) -> None: + """ManifestError with a non-reserved domain should return None without raising.""" + mocker.patch( + "pipelex.libraries.library_manager.find_package_manifest", + side_effect=ManifestError(message="corrupt METHODS.toml"), + ) + + blueprint = PipelexBundleBlueprint( + domain="legal", + source="test_bad_manifest.mthds", + ) + + dummy_path = tmp_path / "test_bad_manifest.mthds" + dummy_path.touch() + + manager = LibraryManager() + result = manager._check_package_visibility( # noqa: SLF001 # pyright: ignore[reportPrivateUsage] + blueprints=[blueprint], + mthds_paths=[dummy_path], + ) + assert result is None diff --git a/tests/unit/pipelex/pipe_controllers/parallel/data.py b/tests/unit/pipelex/pipe_controllers/parallel/data.py index bdbfda0f9..9d65b0263 100644 --- a/tests/unit/pipelex/pipe_controllers/parallel/data.py +++ b/tests/unit/pipelex/pipe_controllers/parallel/data.py @@ -14,7 +14,7 @@ class PipeParallelInputTestCases: description="Test case: valid_with_add_each_output", inputs={"data": "native.Text"}, output="native.Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="process_a", result="result_a"), SubPipeBlueprint(pipe="process_b", result="result_b"), ], @@ -28,7 +28,7 @@ class PipeParallelInputTestCases: description="Test case: valid_with_combined_output", inputs={"data": "native.Text"}, output="native.Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="analyze_1", result="analysis_1"), SubPipeBlueprint(pipe="analyze_2", result="analysis_2"), ], @@ -42,7 +42,7 @@ class PipeParallelInputTestCases: description="Test case: valid_with_both_output_options", inputs={"data": "native.Text"}, output="native.Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="compute_x", result="x"), SubPipeBlueprint(pipe="compute_y", result="y"), ], @@ -52,12 +52,12 @@ class PipeParallelInputTestCases: ) VALID_THREE_PARALLELS: ClassVar[tuple[str, PipeParallelBlueprint]] = ( - "valid_three_parallels", + "valid_three_branches", PipeParallelBlueprint( description="Test case: valid_three_parallels", inputs={"input_data": "native.Text"}, output="native.Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="branch_1", result="result_1"), SubPipeBlueprint(pipe="branch_2", result="result_2"), SubPipeBlueprint(pipe="branch_3", result="result_3"), @@ -72,7 +72,7 @@ class PipeParallelInputTestCases: description="Test case: valid_multiple_inputs", inputs={"text_data": "native.Text", "image_data": "native.Image"}, output="native.Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="process_text", result="text_result"), SubPipeBlueprint(pipe="process_image", result="image_result"), ], @@ -96,7 +96,7 @@ class PipeParallelInputTestCases: "description": "Test case: no_output_options", "inputs": {"data": "native.Text"}, "output": "native.Text", - "parallels": [ + "branches": [ {"pipe": "process_a", "result": "result_a"}, {"pipe": "process_b", "result": "result_b"}, ], diff --git a/tests/unit/pipelex/pipe_controllers/parallel/test_pipe_parallel_blueprint.py b/tests/unit/pipelex/pipe_controllers/parallel/test_pipe_parallel_blueprint.py index 3574cffac..24373dfcb 100644 --- a/tests/unit/pipelex/pipe_controllers/parallel/test_pipe_parallel_blueprint.py +++ b/tests/unit/pipelex/pipe_controllers/parallel/test_pipe_parallel_blueprint.py @@ -11,7 +11,7 @@ def test_pipe_dependencies_correct(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="process_a", result="result_a"), SubPipeBlueprint(pipe="process_b", result="result_b"), ], @@ -23,7 +23,7 @@ def test_pipe_dependencies_correct(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[ + branches=[ SubPipeBlueprint(pipe="step1", result="result1"), SubPipeBlueprint(pipe="step2", result="result2"), SubPipeBlueprint(pipe="step3", result="result3"), @@ -37,7 +37,7 @@ def test_validate_combined_output_correct(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[SubPipeBlueprint(pipe="process", result="result")], + branches=[SubPipeBlueprint(pipe="process", result="result")], combined_output="Text", ) assert blueprint.combined_output == "Text" @@ -46,7 +46,7 @@ def test_validate_combined_output_correct(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[SubPipeBlueprint(pipe="process", result="result")], + branches=[SubPipeBlueprint(pipe="process", result="result")], combined_output="Number", ) assert blueprint.combined_output == "Number" @@ -57,7 +57,7 @@ def test_validate_combined_output_incorrect(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[SubPipeBlueprint(pipe="process", result="result")], + branches=[SubPipeBlueprint(pipe="process", result="result")], combined_output="InvalidConcept!", ) assert "Combined output 'InvalidConcept!' is not a valid concept string or code" in str(exc_info.value) @@ -67,7 +67,7 @@ def test_validate_output_options_correct(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[SubPipeBlueprint(pipe="process", result="result")], + branches=[SubPipeBlueprint(pipe="process", result="result")], add_each_output=True, ) assert blueprint.add_each_output is True @@ -76,7 +76,7 @@ def test_validate_output_options_correct(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[SubPipeBlueprint(pipe="process", result="result")], + branches=[SubPipeBlueprint(pipe="process", result="result")], combined_output="Text", ) assert blueprint.combined_output == "Text" @@ -85,7 +85,7 @@ def test_validate_output_options_correct(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[SubPipeBlueprint(pipe="process", result="result")], + branches=[SubPipeBlueprint(pipe="process", result="result")], add_each_output=True, combined_output="Text", ) @@ -98,7 +98,7 @@ def test_validate_output_options_incorrect(self): description="lorem ipsum", inputs={"data": "Text"}, output="Text", - parallels=[SubPipeBlueprint(pipe="process", result="result")], + branches=[SubPipeBlueprint(pipe="process", result="result")], add_each_output=False, combined_output=None, ) diff --git a/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py b/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py index e61cf87fd..239897614 100644 --- a/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py +++ b/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py @@ -1,6 +1,6 @@ """Unit tests for ConstructBlueprint - the container for field blueprints. -ConstructBlueprint is parsed from the `[pipe.name.construct]` section in PLX files. +ConstructBlueprint is parsed from the `[pipe.name.construct]` section in MTHDS files. """ from typing import Any, ClassVar diff --git a/tests/unit/pipelex/pipe_run/test_dry_run.py b/tests/unit/pipelex/pipe_run/test_dry_run.py new file mode 100644 index 000000000..6c31e9355 --- /dev/null +++ b/tests/unit/pipelex/pipe_run/test_dry_run.py @@ -0,0 +1,67 @@ +import pytest +from pytest_mock import MockerFixture + +from pipelex.libraries.pipe.exceptions import PipeNotFoundError +from pipelex.pipe_run.dry_run import DryRunStatus, dry_run_pipe, dry_run_pipes + + +class TestDryRun: + """Tests for dry_run_pipe and dry_run_pipes status reporting.""" + + @pytest.mark.asyncio + async def test_dry_run_pipe_with_unresolved_dependency_returns_skipped(self, mocker: MockerFixture) -> None: + """A pipe that raises PipeNotFoundError should be reported as SKIPPED, not SUCCESS.""" + mock_pipe = mocker.MagicMock() + mock_pipe.code = "test_pipe" + mock_pipe.needed_inputs.side_effect = PipeNotFoundError("dep->some_domain.some_pipe not found") + + result = await dry_run_pipe(mock_pipe) + + assert result.status == DryRunStatus.SKIPPED + assert result.error_message is not None + assert "unresolved dependency" in result.error_message + + @pytest.mark.asyncio + async def test_dry_run_pipe_with_unresolved_dependency_is_not_success(self, mocker: MockerFixture) -> None: + """Ensure skipped pipes are NOT counted as successful.""" + mock_pipe = mocker.MagicMock() + mock_pipe.code = "test_pipe" + mock_pipe.needed_inputs.side_effect = PipeNotFoundError("dep->some_domain.some_pipe not found") + + result = await dry_run_pipe(mock_pipe) + + assert result.status != DryRunStatus.SUCCESS + assert not result.status.is_success + + @pytest.mark.asyncio + async def test_dry_run_pipes_counts_skipped_separately(self, mocker: MockerFixture) -> None: + """Skipped pipes must not inflate the success count in dry_run_pipes.""" + mock_successful_pipe = mocker.MagicMock() + mock_successful_pipe.code = "successful_pipe" + mock_successful_pipe.needed_inputs.return_value = mocker.MagicMock(named_stuff_specs=[]) + mock_successful_pipe.validate_with_libraries.return_value = None + mock_successful_pipe.run_pipe = mocker.AsyncMock(return_value=None) + + mock_skipped_pipe = mocker.MagicMock() + mock_skipped_pipe.code = "skipped_pipe" + mock_skipped_pipe.needed_inputs.side_effect = PipeNotFoundError("dep->domain.pipe not found") + + results = await dry_run_pipes( + pipes=[mock_successful_pipe, mock_skipped_pipe], + raise_on_failure=False, + ) + + assert results["successful_pipe"].status == DryRunStatus.SUCCESS + assert results["skipped_pipe"].status == DryRunStatus.SKIPPED + + @pytest.mark.asyncio + async def test_dry_run_pipe_skipped_is_not_failure(self, mocker: MockerFixture) -> None: + """A skipped pipe should not be treated as a failure either.""" + mock_pipe = mocker.MagicMock() + mock_pipe.code = "test_pipe" + mock_pipe.needed_inputs.side_effect = PipeNotFoundError("missing dep") + + result = await dry_run_pipe(mock_pipe) + + assert result.status == DryRunStatus.SKIPPED + assert not result.status.is_failure diff --git a/tests/unit/pipelex/system/test_config_resolution.py b/tests/unit/pipelex/system/test_config_resolution.py new file mode 100644 index 000000000..38c89e830 --- /dev/null +++ b/tests/unit/pipelex/system/test_config_resolution.py @@ -0,0 +1,296 @@ +"""Tests for hierarchical config resolution: package defaults -> global -> project.""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest + +if TYPE_CHECKING: + from pytest_mock import MockerFixture + +from pipelex.system.configuration.config_loader import ConfigLoader + + +class TestConfigResolution: + """Test the hierarchical config resolution in ConfigLoader.""" + + def test_find_project_root_with_git(self, tmp_path: Path) -> None: + """Walking up from a deep subdirectory finds the .git marker.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + deep_dir = project_dir / "sub" / "deep" + deep_dir.mkdir(parents=True) + + result = ConfigLoader.find_project_root(deep_dir) + + assert result == project_dir.resolve() + + def test_find_project_root_with_pyproject_toml(self, tmp_path: Path) -> None: + """Walking up from a subdirectory finds pyproject.toml marker.""" + project_dir = tmp_path / "project" + project_dir.mkdir(parents=True) + (project_dir / "pyproject.toml").write_text("[project]\nname = 'test'") + sub_dir = project_dir / "src" / "app" + sub_dir.mkdir(parents=True) + + result = ConfigLoader.find_project_root(sub_dir) + + assert result == project_dir.resolve() + + def test_find_project_root_with_setup_py(self, tmp_path: Path) -> None: + """Walking up finds setup.py marker.""" + project_dir = tmp_path / "project" + project_dir.mkdir(parents=True) + (project_dir / "setup.py").write_text("") + sub_dir = project_dir / "src" + sub_dir.mkdir(parents=True) + + result = ConfigLoader.find_project_root(sub_dir) + + assert result == project_dir.resolve() + + def test_find_project_root_with_package_json(self, tmp_path: Path) -> None: + """Walking up finds package.json marker.""" + project_dir = tmp_path / "project" + project_dir.mkdir(parents=True) + (project_dir / "package.json").write_text("{}") + sub_dir = project_dir / "src" / "components" + sub_dir.mkdir(parents=True) + + result = ConfigLoader.find_project_root(sub_dir) + + assert result == project_dir.resolve() + + def test_find_project_root_no_markers(self, tmp_path: Path) -> None: + """A directory tree with no markers returns None.""" + deep_dir = tmp_path / "no_project" / "a" / "b" / "c" + deep_dir.mkdir(parents=True) + + result = ConfigLoader.find_project_root(deep_dir) + + # Should walk all the way up to filesystem root and return None + # (unless the real filesystem has markers above tmp_path, which it does) + # So we test indirectly: if the deep_dir itself has no marker ancestors under tmp_path, + # the function will eventually find real system markers. We test the negative case more carefully. + # Instead, test with a mocked scenario. + assert result is None or result.resolve() not in { + deep_dir.resolve(), + (tmp_path / "no_project" / "a" / "b").resolve(), + (tmp_path / "no_project" / "a").resolve(), + (tmp_path / "no_project").resolve(), + } + + def test_find_project_root_stops_at_nearest_marker(self, tmp_path: Path) -> None: + """When multiple markers exist in the tree, the nearest one wins.""" + outer = tmp_path / "outer" + (outer / ".git").mkdir(parents=True) + inner = outer / "inner" + inner.mkdir(parents=True) + (inner / "pyproject.toml").write_text("[project]\nname = 'inner'") + deep_dir = inner / "src" + deep_dir.mkdir(parents=True) + + result = ConfigLoader.find_project_root(deep_dir) + + assert result == inner.resolve() + + def test_global_config_dir_is_home(self, mocker: MockerFixture) -> None: + """global_config_dir always points to ~/.pipelex.""" + fake_home = Path("/fake/home") + mocker.patch.object(Path, "home", return_value=fake_home) + + loader = ConfigLoader() + + assert loader.global_config_dir == str(fake_home / ".pipelex") + + def test_project_config_dir_when_exists(self, tmp_path: Path, mocker: MockerFixture) -> None: + """project_config_dir returns the path when .pipelex exists at the project root.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + (project_dir / ".pipelex").mkdir(parents=True) + + mocker.patch.object(Path, "cwd", return_value=project_dir) + + loader = ConfigLoader() + + assert loader.project_config_dir == str((project_dir / ".pipelex").resolve()) + + def test_project_config_dir_none_when_no_pipelex_dir(self, tmp_path: Path, mocker: MockerFixture) -> None: + """project_config_dir returns None when project root has no .pipelex directory.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + + mocker.patch.object(Path, "cwd", return_value=project_dir) + + loader = ConfigLoader() + + assert loader.project_config_dir is None + + def test_effective_config_dir_is_project_when_exists(self, tmp_path: Path, mocker: MockerFixture) -> None: + """pipelex_config_dir returns project path when project .pipelex exists.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + (project_dir / ".pipelex").mkdir(parents=True) + + mocker.patch.object(Path, "cwd", return_value=project_dir) + mocker.patch.object(Path, "home", return_value=tmp_path / "home") + + loader = ConfigLoader() + + assert loader.pipelex_config_dir == str((project_dir / ".pipelex").resolve()) + + def test_effective_config_dir_falls_back_to_global(self, tmp_path: Path, mocker: MockerFixture) -> None: + """pipelex_config_dir returns global path when no project .pipelex exists.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + # No .pipelex dir in project + + fake_home = tmp_path / "home" + fake_home.mkdir(parents=True) + + mocker.patch.object(Path, "cwd", return_value=project_dir) + mocker.patch.object(Path, "home", return_value=fake_home) + + loader = ConfigLoader() + + assert loader.pipelex_config_dir == str(fake_home / ".pipelex") + + def test_project_root_returns_str_when_found(self, tmp_path: Path, mocker: MockerFixture) -> None: + """project_root returns the project root as a string.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + sub_dir = project_dir / "sub" + sub_dir.mkdir(parents=True) + + mocker.patch.object(Path, "cwd", return_value=sub_dir) + + loader = ConfigLoader() + + assert loader.project_root == str(project_dir.resolve()) + + def test_project_root_returns_none_without_markers(self, mocker: MockerFixture) -> None: + """project_root returns None when no project root markers are found. + + We mock find_project_root to avoid walking real filesystem. + """ + mocker.patch.object(ConfigLoader, "find_project_root", return_value=None) + + loader = ConfigLoader() + + assert loader.project_root is None + + def test_inference_files_from_project(self, tmp_path: Path, mocker: MockerFixture) -> None: + """Inference file paths resolve to project dir when it has the files.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + pipelex_dir = project_dir / ".pipelex" + inference_dir = pipelex_dir / "inference" + inference_dir.mkdir(parents=True) + (inference_dir / "backends.toml").write_text("[backends]") + backends_dir = inference_dir / "backends" + backends_dir.mkdir() + (inference_dir / "routing_profiles.toml").write_text("[routing]") + deck_dir = inference_dir / "deck" + deck_dir.mkdir() + + mocker.patch.object(Path, "cwd", return_value=project_dir) + mocker.patch.object(Path, "home", return_value=tmp_path / "home") + + loader = ConfigLoader() + + assert loader.backends_file_path == str(inference_dir / "backends.toml") + assert loader.backends_dir_path == str(backends_dir) + assert loader.routing_profiles_file_path == str(inference_dir / "routing_profiles.toml") + assert loader.model_decks_dir_path == str(deck_dir) + + def test_inference_files_fallback_to_global(self, tmp_path: Path, mocker: MockerFixture) -> None: + """Inference file paths fall back to global dir when project dir has no files.""" + project_dir = tmp_path / "project" + (project_dir / ".git").mkdir(parents=True) + # Project .pipelex exists but has no inference files + (project_dir / ".pipelex").mkdir(parents=True) + + global_home = tmp_path / "home" + global_config = global_home / ".pipelex" / "inference" + global_config.mkdir(parents=True) + (global_config / "backends.toml").write_text("[backends]") + + mocker.patch.object(Path, "cwd", return_value=project_dir) + mocker.patch.object(Path, "home", return_value=global_home) + + loader = ConfigLoader() + + # backends.toml exists in global, so it should resolve there + assert loader.backends_file_path == str(global_config / "backends.toml") + + def test_ensure_global_config_created_on_first_run(self, tmp_path: Path, mocker: MockerFixture) -> None: + """ensure_global_config_exists creates ~/.pipelex/ with template files when it doesn't exist.""" + fake_home = tmp_path / "home" + fake_home.mkdir(parents=True) + + mocker.patch.object(Path, "home", return_value=fake_home) + + loader = ConfigLoader() + + # Verify global dir does not exist yet + global_dir = fake_home / ".pipelex" + assert not global_dir.exists() + + loader.ensure_global_config_exists() + + # Verify global dir was created with content + assert global_dir.is_dir() + assert (global_dir / "pipelex.toml").exists() + assert (global_dir / "inference").is_dir() + + def test_global_config_not_recreated_if_exists(self, tmp_path: Path, mocker: MockerFixture) -> None: + """ensure_global_config_exists does not overwrite existing ~/.pipelex/.""" + fake_home = tmp_path / "home" + global_dir = fake_home / ".pipelex" + global_dir.mkdir(parents=True) + marker_file = global_dir / "custom_marker.txt" + marker_file.write_text("do not delete") + + mocker.patch.object(Path, "home", return_value=fake_home) + + loader = ConfigLoader() + loader.ensure_global_config_exists() + + # Custom file should still be there + assert marker_file.exists() + assert marker_file.read_text() == "do not delete" + + @pytest.mark.parametrize( + "marker", + [".git", "pyproject.toml", "setup.py", "setup.cfg", "package.json", ".hg"], + ) + def test_find_project_root_all_markers(self, tmp_path: Path, marker: str) -> None: + """All supported markers are recognized.""" + project_dir = tmp_path / "project" + project_dir.mkdir(parents=True) + marker_path = project_dir / marker + if marker in {".git", ".hg"}: + marker_path.mkdir() + else: + marker_path.write_text("") + sub_dir = project_dir / "src" + sub_dir.mkdir(parents=True) + + result = ConfigLoader.find_project_root(sub_dir) + + assert result == project_dir.resolve() + + def test_cross_platform_paths(self, tmp_path: Path, mocker: MockerFixture) -> None: + """Config paths use Path throughout, no hardcoded separators.""" + fake_home = tmp_path / "home" + fake_home.mkdir(parents=True) + mocker.patch.object(Path, "home", return_value=fake_home) + + loader = ConfigLoader() + global_dir = loader.global_config_dir + + # Verify the path is valid and uses the correct separator for the platform + assert Path(global_dir).name == ".pipelex" + assert Path(global_dir).parent == fake_home diff --git a/tests/unit/pipelex/system/test_pipelex_check_initialization.py b/tests/unit/pipelex/system/test_pipelex_check_initialization.py index 81f60501d..17d0973c5 100644 --- a/tests/unit/pipelex/system/test_pipelex_check_initialization.py +++ b/tests/unit/pipelex/system/test_pipelex_check_initialization.py @@ -8,12 +8,18 @@ from pytest_mock import MockerFixture from pipelex.system.configuration.config_check import check_is_initialized -from pipelex.system.configuration.configs import ConfigPaths class TestPipelexCheckInitialization: """Test the check_is_initialized function from config_check module.""" + def _mock_config_manager_paths(self, mocker: MockerFixture, backends_file: str, routing_file: str) -> None: + """Mock config_manager properties used by config_check.""" + mock_manager = mocker.MagicMock() + mock_manager.backends_file_path = backends_file + mock_manager.routing_profiles_file_path = routing_file + mocker.patch("pipelex.system.configuration.config_check.config_manager", mock_manager) + def test_check_is_initialized_returns_true_when_all_files_exist(self, tmp_path: Path, mocker: MockerFixture) -> None: """Test that check_is_initialized returns True when all required files exist.""" # Setup test directories @@ -24,9 +30,8 @@ def test_check_is_initialized_returns_true_when_all_files_exist(self, tmp_path: backends_file.write_text("[backends]\nconfig = 'value'") routing_file.write_text("[routing]\nconfig = 'value'") - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Test result = check_is_initialized() @@ -43,9 +48,8 @@ def test_check_is_initialized_returns_false_when_backends_missing(self, tmp_path routing_file = config_dir / "routing_profiles.toml" routing_file.write_text("[routing]\nconfig = 'value'") - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Test result = check_is_initialized(print_warning_if_not=False) @@ -62,9 +66,8 @@ def test_check_is_initialized_returns_false_when_routing_missing(self, tmp_path: routing_file = config_dir / "routing_profiles.toml" backends_file.write_text("[backends]\nconfig = 'value'") - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Test result = check_is_initialized(print_warning_if_not=False) @@ -80,9 +83,8 @@ def test_check_is_initialized_returns_false_when_all_files_missing(self, tmp_pat backends_file = config_dir / "backends.toml" routing_file = config_dir / "routing_profiles.toml" - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Test result = check_is_initialized(print_warning_if_not=False) @@ -98,9 +100,8 @@ def test_check_is_initialized_prints_warning_when_not_initialized(self, tmp_path backends_file = config_dir / "backends.toml" routing_file = config_dir / "routing_profiles.toml" - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Mock console.print to suppress output during test mock_console = mocker.MagicMock() @@ -123,9 +124,8 @@ def test_check_is_initialized_returns_true_when_initialized_with_print_warning(s backends_file.write_text("[backends]\nconfig = 'value'") routing_file.write_text("[routing]\nconfig = 'value'") - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Test result = check_is_initialized(print_warning_if_not=True) @@ -142,9 +142,8 @@ def test_check_is_initialized_returns_false_with_only_backends_missing(self, tmp routing_file = config_dir / "routing_profiles.toml" routing_file.write_text("[routing]\nconfig = 'value'") - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Mock console.print to suppress output during test mocker.patch("pipelex.system.configuration.config_check.get_console", return_value=mocker.MagicMock()) @@ -164,9 +163,8 @@ def test_check_is_initialized_returns_false_with_only_routing_missing(self, tmp_ routing_file = config_dir / "routing_profiles.toml" backends_file.write_text("[backends]\nconfig = 'value'") - # Mock ConfigPaths to point to temp directory - mocker.patch.object(ConfigPaths, "BACKENDS_FILE_PATH", str(backends_file)) - mocker.patch.object(ConfigPaths, "ROUTING_PROFILES_FILE_PATH", str(routing_file)) + # Mock config_manager to point to temp directory + self._mock_config_manager_paths(mocker, str(backends_file), str(routing_file)) # Mock console.print to suppress output during test mocker.patch("pipelex.system.configuration.config_check.get_console", return_value=mocker.MagicMock()) diff --git a/tests/unit/pipelex/tools/misc/test_semver.py b/tests/unit/pipelex/tools/misc/test_semver.py new file mode 100644 index 000000000..f41901788 --- /dev/null +++ b/tests/unit/pipelex/tools/misc/test_semver.py @@ -0,0 +1,225 @@ +# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false, reportUnknownArgumentType=false +import pytest +from semantic_version import SimpleSpec, Version # type: ignore[import-untyped] + +from pipelex.tools.misc.semver import ( + SemVerError, + parse_constraint, + parse_version, + parse_version_tag, + select_minimum_version, + select_minimum_version_for_multiple_constraints, + version_satisfies, +) + + +class TestSemver: + """Tests for the semver constraint evaluation engine.""" + + @pytest.mark.parametrize( + "version_str", + [ + "1.0.0", + "0.1.0", + "1.2.3-alpha", + "1.2.3-alpha.1", + "1.2.3+build", + "1.2.3-beta.1+build.123", + ], + ) + def test_parse_version_valid(self, version_str: str) -> None: + """Valid semver strings parse without error.""" + result = parse_version(version_str) + assert isinstance(result, Version) + + @pytest.mark.parametrize( + "version_str", + [ + "abc", + "", + "1.0.0.0", + ], + ) + def test_parse_version_invalid(self, version_str: str) -> None: + """Invalid semver strings raise SemVerError.""" + with pytest.raises(SemVerError): + parse_version(version_str) + + def test_parse_version_strips_v_prefix(self) -> None: + """A leading 'v' prefix is stripped before parsing.""" + result = parse_version("v1.2.3") + assert result == Version("1.2.3") + + @pytest.mark.parametrize( + ("lower", "higher"), + [ + ("1.0.0", "2.0.0"), + ("1.0.0", "1.1.0"), + ("1.0.0", "1.0.1"), + ("1.0.0-alpha", "1.0.0"), + ("1.0.0-alpha", "1.0.0-beta"), + ], + ) + def test_version_comparison_ordering(self, lower: str, higher: str) -> None: + """Versions compare in the correct semver order.""" + assert parse_version(lower) < parse_version(higher) + + @pytest.mark.parametrize( + ("constraint_str", "version_str", "expected"), + [ + ("^1.2.3", "1.2.3", True), + ("^1.2.3", "1.9.9", True), + ("^1.2.3", "2.0.0", False), + ("^1.2.3", "1.2.2", False), + ("^0.2.3", "0.2.3", True), + ("^0.2.3", "0.2.9", True), + ("^0.2.3", "0.3.0", False), + ("^0.2.3", "0.2.2", False), + ], + ) + def test_version_satisfies_caret(self, constraint_str: str, version_str: str, expected: bool) -> None: + """Caret constraints allow compatible updates within the same major (or minor for 0.x).""" + constraint = parse_constraint(constraint_str) + version = parse_version(version_str) + assert version_satisfies(version, constraint) == expected + + @pytest.mark.parametrize( + ("constraint_str", "version_str", "expected"), + [ + ("~1.2.3", "1.2.3", True), + ("~1.2.3", "1.2.9", True), + ("~1.2.3", "1.3.0", False), + ("~1.2.3", "1.2.2", False), + ], + ) + def test_version_satisfies_tilde(self, constraint_str: str, version_str: str, expected: bool) -> None: + """Tilde constraints allow patch-level updates only.""" + constraint = parse_constraint(constraint_str) + version = parse_version(version_str) + assert version_satisfies(version, constraint) == expected + + @pytest.mark.parametrize( + ("constraint_str", "version_str", "expected"), + [ + (">=1.0.0", "1.0.0", True), + (">=1.0.0", "0.9.9", False), + (">1.0.0", "1.0.1", True), + (">1.0.0", "1.0.0", False), + ("<=2.0.0", "2.0.0", True), + ("<=2.0.0", "2.0.1", False), + ("<2.0.0", "1.9.9", True), + ("<2.0.0", "2.0.0", False), + ("==1.0.0", "1.0.0", True), + ("==1.0.0", "1.0.1", False), + ("!=1.0.0", "1.0.1", True), + ("!=1.0.0", "1.0.0", False), + ], + ) + def test_version_satisfies_comparison_ops(self, constraint_str: str, version_str: str, expected: bool) -> None: + """Comparison operators (>=, >, <=, <, ==, !=) work correctly.""" + constraint = parse_constraint(constraint_str) + version = parse_version(version_str) + assert version_satisfies(version, constraint) == expected + + @pytest.mark.parametrize( + ("constraint_str", "version_str", "expected"), + [ + ("*", "1.0.0", True), + ("*", "99.99.99", True), + ("==1.*", "1.0.0", True), + ("==1.*", "1.9.9", True), + ("==1.*", "2.0.0", False), + ], + ) + def test_version_satisfies_wildcard(self, constraint_str: str, version_str: str, expected: bool) -> None: + """Wildcard constraints match any version (or within a major range).""" + constraint = parse_constraint(constraint_str) + version = parse_version(version_str) + assert version_satisfies(version, constraint) == expected + + @pytest.mark.parametrize( + ("constraint_str", "version_str", "expected"), + [ + (">=1.0.0,<2.0.0", "1.5.0", True), + (">=1.0.0,<2.0.0", "0.9.0", False), + (">=1.0.0,<2.0.0", "2.0.0", False), + ], + ) + def test_version_satisfies_compound(self, constraint_str: str, version_str: str, expected: bool) -> None: + """Compound constraints (AND of multiple sub-constraints) work correctly.""" + constraint = parse_constraint(constraint_str) + version = parse_version(version_str) + assert version_satisfies(version, constraint) == expected + + def test_version_satisfies_exact_no_operator(self) -> None: + """A bare version string (no operator) means exact match.""" + constraint = parse_constraint("1.0.0") + assert version_satisfies(parse_version("1.0.0"), constraint) is True + assert version_satisfies(parse_version("1.0.1"), constraint) is False + + @pytest.mark.parametrize( + ("tag", "expected_major", "expected_minor", "expected_patch"), + [ + ("v1.2.3", 1, 2, 3), + ("1.0.0", 1, 0, 0), + ], + ) + def test_parse_version_tag_valid(self, tag: str, expected_major: int, expected_minor: int, expected_patch: int) -> None: + """Valid semver tags (with or without v prefix) parse to Version.""" + result = parse_version_tag(tag) + assert result is not None + assert result.major == expected_major + assert result.minor == expected_minor + assert result.patch == expected_patch + + @pytest.mark.parametrize( + "tag", + [ + "release-20240101", + "latest", + ], + ) + def test_parse_version_tag_invalid(self, tag: str) -> None: + """Non-semver tags return None.""" + assert parse_version_tag(tag) is None + + def test_select_minimum_version(self) -> None: + """MVS returns the lowest version satisfying the constraint.""" + versions = [Version("1.0.0"), Version("1.1.0"), Version("1.2.0"), Version("2.0.0")] + constraint = SimpleSpec("^1.0.0") + result = select_minimum_version(versions, constraint) + assert result == Version("1.0.0") + + def test_select_minimum_version_skips_non_matching(self) -> None: + """MVS skips versions that don't satisfy the constraint.""" + versions = [Version("0.9.0"), Version("1.0.0"), Version("1.5.0")] + constraint = SimpleSpec(">=1.0.0") + result = select_minimum_version(versions, constraint) + assert result == Version("1.0.0") + + def test_select_minimum_version_no_match(self) -> None: + """MVS returns None when no version matches.""" + versions = [Version("1.0.0")] + constraint = SimpleSpec("^2.0.0") + result = select_minimum_version(versions, constraint) + assert result is None + + def test_select_minimum_version_empty_list(self) -> None: + """MVS returns None for an empty version list.""" + constraint = SimpleSpec("^1.0.0") + result = select_minimum_version([], constraint) + assert result is None + + def test_select_minimum_version_multiple_constraints(self) -> None: + """Multi-constraint MVS returns the lowest version satisfying all constraints.""" + versions = [Version("1.0.0"), Version("1.2.0"), Version("2.0.0")] + constraints = [SimpleSpec(">=1.0.0"), SimpleSpec(">=1.2.0")] + result = select_minimum_version_for_multiple_constraints(versions, constraints) + assert result == Version("1.2.0") + + def test_select_minimum_version_multiple_constraints_unsatisfiable(self) -> None: + """Multi-constraint MVS returns None when constraints are unsatisfiable together.""" + versions = [Version("1.0.0"), Version("2.0.0")] + constraints = [SimpleSpec(">=1.5.0"), SimpleSpec("<2.0.0")] + result = select_minimum_version_for_multiple_constraints(versions, constraints) + assert result is None diff --git a/tests/unit/pipelex/tools/test.mthds b/tests/unit/pipelex/tools/test.mthds new file mode 100644 index 000000000..7f227ffdf --- /dev/null +++ b/tests/unit/pipelex/tools/test.mthds @@ -0,0 +1,13 @@ + +domain = "test" + +[concept] +CodebaseFileContent = "The content of a codebase file" +FilePath = "The path of a file" + +[pipe.read_doc_file] +type = "PipeFunc" +description = "Read the content of related codebase files" +inputs = { related_file_paths = "FilePath" } +output = "CodebaseFileContent[]" +function_name = "read_file_content" diff --git a/tests/unit/pipelex/tools/test.plx b/tests/unit/pipelex/tools/test.plx deleted file mode 100644 index eab2abf75..000000000 --- a/tests/unit/pipelex/tools/test.plx +++ /dev/null @@ -1,14 +0,0 @@ - -domain = "test" - -[concept] -CodebaseFileContent = "The content of a codebase file" -FilePath = "The path of a file" - -[pipe.read_doc_file] -type = "PipeFunc" -description = "Read the content of related codebase files" -inputs = { related_file_paths = "FilePath" } -output = "CodebaseFileContent[]" -function_name = "read_file_content" - diff --git a/tests/unit/pipelex/tools/test_jinja2_required_variables.py b/tests/unit/pipelex/tools/test_jinja2_required_variables.py index 95f81b774..54b7c87eb 100644 --- a/tests/unit/pipelex/tools/test_jinja2_required_variables.py +++ b/tests/unit/pipelex/tools/test_jinja2_required_variables.py @@ -181,19 +181,19 @@ class TestData: ), ] - PLX_STYLE_TEMPLATES: ClassVar[list[tuple[str, str, set[str]]]] = [ + MTHDS_STYLE_TEMPLATES: ClassVar[list[tuple[str, str, set[str]]]] = [ ( - "plx_at_variable_preprocessed", + "mthds_at_variable_preprocessed", '{{ page.page_view|tag("page.page_view") }}', {"page.page_view"}, ), ( - "plx_dollar_variable_preprocessed", + "mthds_dollar_variable_preprocessed", "{{ page.text_and_images.text.text|format() }}", {"page.text_and_images.text.text"}, ), ( - "plx_mixed_preprocessed", + "mthds_mixed_preprocessed", '{{ page.page_view|tag("page.page_view") }}\n{{ page.text_and_images.text.text|format() }}', {"page.page_view", "page.text_and_images.text.text"}, ), @@ -343,15 +343,15 @@ def test_optional_variables( @pytest.mark.parametrize( ("topic", "template_source", "expected_paths"), - TestData.PLX_STYLE_TEMPLATES, + TestData.MTHDS_STYLE_TEMPLATES, ) - def test_plx_style_templates( + def test_mthds_style_templates( self, topic: str, template_source: str, expected_paths: set[str], ): - """Test detection in PLX-style preprocessed templates with tag/format filters.""" + """Test detection in MTHDS-style preprocessed templates with tag/format filters.""" result = detect_jinja2_required_variables( template_category=TemplateCategory.LLM_PROMPT, template_source=template_source, @@ -647,7 +647,7 @@ def test_same_variable_multiple_times_combines_filters(self) -> None: assert "upper" in result[0].filters def test_format_filter_detected(self) -> None: - """Test that format filter (common in PLX templates) is detected.""" + """Test that format filter (common in MTHDS templates) is detected.""" result = detect_jinja2_variable_references( template_category=TemplateCategory.LLM_PROMPT, template_source="{{ content|format() }}", diff --git a/uv.lock b/uv.lock index b510d23ad..236fdd334 100644 --- a/uv.lock +++ b/uv.lock @@ -223,6 +223,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, ] +[[package]] +name = "annotated-doc" +version = "0.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, +] + [[package]] name = "annotated-types" version = "0.7.0" @@ -234,7 +243,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.79.0" +version = "0.81.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -246,9 +255,9 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/15/b1/91aea3f8fd180d01d133d931a167a78a3737b3fd39ccef2ae8d6619c24fd/anthropic-0.79.0.tar.gz", hash = "sha256:8707aafb3b1176ed6c13e2b1c9fb3efddce90d17aee5d8b83a86c70dcdcca871", size = 509825, upload-time = "2026-02-07T18:06:18.388Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/c2/d2bb9b3c82c386abf3b2c32ae0452a8dcb89ed2809d875e1420bea22e318/anthropic-0.81.0.tar.gz", hash = "sha256:bab2d4e45c2e81a0668fdc2da2f7fd665ed8a0295ba3c86450f9dcc3a7804524", size = 532935, upload-time = "2026-02-18T04:00:54.658Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/95/b2/cc0b8e874a18d7da50b0fda8c99e4ac123f23bf47b471827c5f6f3e4a767/anthropic-0.79.0-py3-none-any.whl", hash = "sha256:04cbd473b6bbda4ca2e41dd670fe2f829a911530f01697d0a1e37321eb75f3cf", size = 405918, upload-time = "2026-02-07T18:06:20.246Z" }, + { url = "https://files.pythonhosted.org/packages/86/27/a18e1613da66b3c9c7565c92457a60de15e824a6dd2ed9bce0fbfe615ded/anthropic-0.81.0-py3-none-any.whl", hash = "sha256:ac54407e9a1f9b35e6e6c86f75bf403f0e54d60944f99f15f685a38d6829f20b", size = 455627, upload-time = "2026-02-18T04:00:53.207Z" }, ] [[package]] @@ -339,16 +348,16 @@ wheels = [ [[package]] name = "backrefs" -version = "6.1" +version = "6.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/86/e3/bb3a439d5cb255c4774724810ad8073830fac9c9dee123555820c1bcc806/backrefs-6.1.tar.gz", hash = "sha256:3bba1749aafe1db9b915f00e0dd166cba613b6f788ffd63060ac3485dc9be231", size = 7011962, upload-time = "2025-11-15T14:52:08.323Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/a6/e325ec73b638d3ede4421b5445d4a0b8b219481826cc079d510100af356c/backrefs-6.2.tar.gz", hash = "sha256:f44ff4d48808b243b6c0cdc6231e22195c32f77046018141556c66f8bab72a49", size = 7012303, upload-time = "2026-02-16T19:10:15.828Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3b/ee/c216d52f58ea75b5e1841022bbae24438b19834a29b163cb32aa3a2a7c6e/backrefs-6.1-py310-none-any.whl", hash = "sha256:2a2ccb96302337ce61ee4717ceacfbf26ba4efb1d55af86564b8bbaeda39cac1", size = 381059, upload-time = "2025-11-15T14:51:59.758Z" }, - { url = "https://files.pythonhosted.org/packages/e6/9a/8da246d988ded941da96c7ed945d63e94a445637eaad985a0ed88787cb89/backrefs-6.1-py311-none-any.whl", hash = "sha256:e82bba3875ee4430f4de4b6db19429a27275d95a5f3773c57e9e18abc23fd2b7", size = 392854, upload-time = "2025-11-15T14:52:01.194Z" }, - { url = "https://files.pythonhosted.org/packages/37/c9/fd117a6f9300c62bbc33bc337fd2b3c6bfe28b6e9701de336b52d7a797ad/backrefs-6.1-py312-none-any.whl", hash = "sha256:c64698c8d2269343d88947c0735cb4b78745bd3ba590e10313fbf3f78c34da5a", size = 398770, upload-time = "2025-11-15T14:52:02.584Z" }, - { url = "https://files.pythonhosted.org/packages/eb/95/7118e935b0b0bd3f94dfec2d852fd4e4f4f9757bdb49850519acd245cd3a/backrefs-6.1-py313-none-any.whl", hash = "sha256:4c9d3dc1e2e558965202c012304f33d4e0e477e1c103663fd2c3cc9bb18b0d05", size = 400726, upload-time = "2025-11-15T14:52:04.093Z" }, - { url = "https://files.pythonhosted.org/packages/1d/72/6296bad135bfafd3254ae3648cd152980a424bd6fed64a101af00cc7ba31/backrefs-6.1-py314-none-any.whl", hash = "sha256:13eafbc9ccd5222e9c1f0bec563e6d2a6d21514962f11e7fc79872fd56cbc853", size = 412584, upload-time = "2025-11-15T14:52:05.233Z" }, - { url = "https://files.pythonhosted.org/packages/02/e3/a4fa1946722c4c7b063cc25043a12d9ce9b4323777f89643be74cef2993c/backrefs-6.1-py39-none-any.whl", hash = "sha256:a9e99b8a4867852cad177a6430e31b0f6e495d65f8c6c134b68c14c3c95bf4b0", size = 381058, upload-time = "2025-11-15T14:52:06.698Z" }, + { url = "https://files.pythonhosted.org/packages/1b/39/3765df263e08a4df37f4f43cb5aa3c6c17a4bdd42ecfe841e04c26037171/backrefs-6.2-py310-none-any.whl", hash = "sha256:0fdc7b012420b6b144410342caeb8adc54c6866cf12064abc9bb211302e496f8", size = 381075, upload-time = "2026-02-16T19:10:04.322Z" }, + { url = "https://files.pythonhosted.org/packages/0f/f0/35240571e1b67ffb19dafb29ab34150b6f59f93f717b041082cdb1bfceb1/backrefs-6.2-py311-none-any.whl", hash = "sha256:08aa7fae530c6b2361d7bdcbda1a7c454e330cc9dbcd03f5c23205e430e5c3be", size = 392874, upload-time = "2026-02-16T19:10:06.314Z" }, + { url = "https://files.pythonhosted.org/packages/e3/63/77e8c9745b4d227cce9f5e0a6f68041278c5f9b18588b35905f5f19c1beb/backrefs-6.2-py312-none-any.whl", hash = "sha256:c3f4b9cb2af8cda0d87ab4f57800b57b95428488477be164dd2b47be54db0c90", size = 398787, upload-time = "2026-02-16T19:10:08.274Z" }, + { url = "https://files.pythonhosted.org/packages/c5/71/c754b1737ad99102e03fa3235acb6cb6d3ac9d6f596cbc3e5f236705abd8/backrefs-6.2-py313-none-any.whl", hash = "sha256:12df81596ab511f783b7d87c043ce26bc5b0288cf3bb03610fe76b8189282b2b", size = 400747, upload-time = "2026-02-16T19:10:09.791Z" }, + { url = "https://files.pythonhosted.org/packages/af/75/be12ba31a6eb20dccef2320cd8ccb3f7d9013b68ba4c70156259fee9e409/backrefs-6.2-py314-none-any.whl", hash = "sha256:e5f805ae09819caa1aa0623b4a83790e7028604aa2b8c73ba602c4454e665de7", size = 412602, upload-time = "2026-02-16T19:10:12.317Z" }, + { url = "https://files.pythonhosted.org/packages/21/f8/d02f650c47d05034dcd6f9c8cf94f39598b7a89c00ecda0ecb2911bc27e9/backrefs-6.2-py39-none-any.whl", hash = "sha256:664e33cd88c6840b7625b826ecf2555f32d491800900f5a541f772c485f7cda7", size = 381077, upload-time = "2026-02-16T19:10:13.74Z" }, ] [[package]] @@ -380,16 +389,16 @@ wheels = [ [[package]] name = "boto3-stubs" -version = "1.42.44" +version = "1.42.51" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore-stubs" }, { name = "types-s3transfer" }, { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/07/5e/e75fa05e1804ea08306463d9db9e9289916e8e335d162cb54f5903efbac8/boto3_stubs-1.42.44.tar.gz", hash = "sha256:e1e5a614582d9a04de47539356fe61e2dff53a271628d6e59d455a46ba25bf48", size = 100873, upload-time = "2026-02-06T20:40:24.515Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/69/41759e8fd0b256da1c46a8ef8336265dd6f3c40e974ef9413bcded8e2603/boto3_stubs-1.42.51.tar.gz", hash = "sha256:0fdc9d7a43ec5790c0067cfa037f4018f9927c287fdf2f9cd5c9849173656ec0", size = 100532, upload-time = "2026-02-17T21:34:51.45Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4c/1f/0f96d2a1e2de2ee35d17ed40322984da4778f47bed9a407fcb6f475cdcab/boto3_stubs-1.42.44-py3-none-any.whl", hash = "sha256:a917cad432dfa3ed0dca29f4ac7b44c21b9840810e8a7e64bed5249c65b6a2a0", size = 69782, upload-time = "2026-02-06T20:40:20.197Z" }, + { url = "https://files.pythonhosted.org/packages/44/9e/6a17847e54c42029892126a3d82b3a0122d41686ab75ab2647f6913c57bd/boto3_stubs-1.42.51-py3-none-any.whl", hash = "sha256:af6cad5fb5356ebacfdd7d40dd1a9ad352dae30de03e041991f8520d9a4c9900", size = 69587, upload-time = "2026-02-17T21:34:47.038Z" }, ] [[package]] @@ -642,101 +651,115 @@ wheels = [ [[package]] name = "coverage" -version = "7.13.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/11/43/3e4ac666cc35f231fa70c94e9f38459299de1a152813f9d2f60fc5f3ecaf/coverage-7.13.3.tar.gz", hash = "sha256:f7f6182d3dfb8802c1747eacbfe611b669455b69b7c037484bb1efbbb56711ac", size = 826832, upload-time = "2026-02-03T14:02:30.944Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ab/07/1c8099563a8a6c389a31c2d0aa1497cee86d6248bb4b9ba5e779215db9f9/coverage-7.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0b4f345f7265cdbdb5ec2521ffff15fa49de6d6c39abf89fc7ad68aa9e3a55f0", size = 219143, upload-time = "2026-02-03T13:59:40.459Z" }, - { url = "https://files.pythonhosted.org/packages/69/39/a892d44af7aa092cab70e0cc5cdbba18eeccfe1d6930695dab1742eef9e9/coverage-7.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:96c3be8bae9d0333e403cc1a8eb078a7f928b5650bae94a18fb4820cc993fb9b", size = 219663, upload-time = "2026-02-03T13:59:41.951Z" }, - { url = "https://files.pythonhosted.org/packages/9a/25/9669dcf4c2bb4c3861469e6db20e52e8c11908cf53c14ec9b12e9fd4d602/coverage-7.13.3-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d6f4a21328ea49d38565b55599e1c02834e76583a6953e5586d65cb1efebd8f8", size = 246424, upload-time = "2026-02-03T13:59:43.418Z" }, - { url = "https://files.pythonhosted.org/packages/f3/68/d9766c4e298aca62ea5d9543e1dd1e4e1439d7284815244d8b7db1840bfb/coverage-7.13.3-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fc970575799a9d17d5c3fafd83a0f6ccf5d5117cdc9ad6fbd791e9ead82418b0", size = 248228, upload-time = "2026-02-03T13:59:44.816Z" }, - { url = "https://files.pythonhosted.org/packages/f0/e2/eea6cb4a4bd443741adf008d4cccec83a1f75401df59b6559aca2bdd9710/coverage-7.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:87ff33b652b3556b05e204ae20793d1f872161b0fa5ec8a9ac76f8430e152ed6", size = 250103, upload-time = "2026-02-03T13:59:46.271Z" }, - { url = "https://files.pythonhosted.org/packages/db/77/664280ecd666c2191610842177e2fab9e5dbdeef97178e2078fed46a3d2c/coverage-7.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7df8759ee57b9f3f7b66799b7660c282f4375bef620ade1686d6a7b03699e75f", size = 247107, upload-time = "2026-02-03T13:59:48.53Z" }, - { url = "https://files.pythonhosted.org/packages/2b/df/2a672eab99e0d0eba52d8a63e47dc92245eee26954d1b2d3c8f7d372151f/coverage-7.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f45c9bcb16bee25a798ccba8a2f6a1251b19de6a0d617bb365d7d2f386c4e20e", size = 248143, upload-time = "2026-02-03T13:59:50.027Z" }, - { url = "https://files.pythonhosted.org/packages/a5/dc/a104e7a87c13e57a358b8b9199a8955676e1703bb372d79722b54978ae45/coverage-7.13.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:318b2e4753cbf611061e01b6cc81477e1cdfeb69c36c4a14e6595e674caadb56", size = 246148, upload-time = "2026-02-03T13:59:52.025Z" }, - { url = "https://files.pythonhosted.org/packages/2b/89/e113d3a58dc20b03b7e59aed1e53ebc9ca6167f961876443e002b10e3ae9/coverage-7.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:24db3959de8ee394eeeca89ccb8ba25305c2da9a668dd44173394cbd5aa0777f", size = 246414, upload-time = "2026-02-03T13:59:53.859Z" }, - { url = "https://files.pythonhosted.org/packages/3f/60/a3fd0a6e8d89b488396019a2268b6a1f25ab56d6d18f3be50f35d77b47dc/coverage-7.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:be14d0622125edef21b3a4d8cd2d138c4872bf6e38adc90fd92385e3312f406a", size = 247023, upload-time = "2026-02-03T13:59:55.454Z" }, - { url = "https://files.pythonhosted.org/packages/19/fa/de4840bb939dbb22ba0648a6d8069fa91c9cf3b3fca8b0d1df461e885b3d/coverage-7.13.3-cp310-cp310-win32.whl", hash = "sha256:53be4aab8ddef18beb6188f3a3fdbf4d1af2277d098d4e618be3a8e6c88e74be", size = 221751, upload-time = "2026-02-03T13:59:57.383Z" }, - { url = "https://files.pythonhosted.org/packages/de/87/233ff8b7ef62fb63f58c78623b50bef69681111e0c4d43504f422d88cda4/coverage-7.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:bfeee64ad8b4aae3233abb77eb6b52b51b05fa89da9645518671b9939a78732b", size = 222686, upload-time = "2026-02-03T13:59:58.825Z" }, - { url = "https://files.pythonhosted.org/packages/ec/09/1ac74e37cf45f17eb41e11a21854f7f92a4c2d6c6098ef4a1becb0c6d8d3/coverage-7.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5907605ee20e126eeee2abe14aae137043c2c8af2fa9b38d2ab3b7a6b8137f73", size = 219276, upload-time = "2026-02-03T14:00:00.296Z" }, - { url = "https://files.pythonhosted.org/packages/2e/cb/71908b08b21beb2c437d0d5870c4ec129c570ca1b386a8427fcdb11cf89c/coverage-7.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a88705500988c8acad8b8fd86c2a933d3aa96bec1ddc4bc5cb256360db7bbd00", size = 219776, upload-time = "2026-02-03T14:00:02.414Z" }, - { url = "https://files.pythonhosted.org/packages/09/85/c4f3dd69232887666a2c0394d4be21c60ea934d404db068e6c96aa59cd87/coverage-7.13.3-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bbb5aa9016c4c29e3432e087aa29ebee3f8fda089cfbfb4e6d64bd292dcd1c2", size = 250196, upload-time = "2026-02-03T14:00:04.197Z" }, - { url = "https://files.pythonhosted.org/packages/9c/cc/560ad6f12010344d0778e268df5ba9aa990aacccc310d478bf82bf3d302c/coverage-7.13.3-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0c2be202a83dde768937a61cdc5d06bf9fb204048ca199d93479488e6247656c", size = 252111, upload-time = "2026-02-03T14:00:05.639Z" }, - { url = "https://files.pythonhosted.org/packages/f0/66/3193985fb2c58e91f94cfbe9e21a6fdf941e9301fe2be9e92c072e9c8f8c/coverage-7.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f45e32ef383ce56e0ca099b2e02fcdf7950be4b1b56afaab27b4ad790befe5b", size = 254217, upload-time = "2026-02-03T14:00:07.738Z" }, - { url = "https://files.pythonhosted.org/packages/c5/78/f0f91556bf1faa416792e537c523c5ef9db9b1d32a50572c102b3d7c45b3/coverage-7.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6ed2e787249b922a93cd95c671cc9f4c9797a106e81b455c83a9ddb9d34590c0", size = 250318, upload-time = "2026-02-03T14:00:09.224Z" }, - { url = "https://files.pythonhosted.org/packages/6f/aa/fc654e45e837d137b2c1f3a2cc09b4aea1e8b015acd2f774fa0f3d2ddeba/coverage-7.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:05dd25b21afffe545e808265897c35f32d3e4437663923e0d256d9ab5031fb14", size = 251909, upload-time = "2026-02-03T14:00:10.712Z" }, - { url = "https://files.pythonhosted.org/packages/73/4d/ab53063992add8a9ca0463c9d92cce5994a29e17affd1c2daa091b922a93/coverage-7.13.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:46d29926349b5c4f1ea4fca95e8c892835515f3600995a383fa9a923b5739ea4", size = 249971, upload-time = "2026-02-03T14:00:12.402Z" }, - { url = "https://files.pythonhosted.org/packages/29/25/83694b81e46fcff9899694a1b6f57573429cdd82b57932f09a698f03eea5/coverage-7.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:fae6a21537519c2af00245e834e5bf2884699cc7c1055738fd0f9dc37a3644ad", size = 249692, upload-time = "2026-02-03T14:00:13.868Z" }, - { url = "https://files.pythonhosted.org/packages/d4/ef/d68fc304301f4cb4bf6aefa0045310520789ca38dabdfba9dbecd3f37919/coverage-7.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c672d4e2f0575a4ca2bf2aa0c5ced5188220ab806c1bb6d7179f70a11a017222", size = 250597, upload-time = "2026-02-03T14:00:15.461Z" }, - { url = "https://files.pythonhosted.org/packages/8d/85/240ad396f914df361d0f71e912ddcedb48130c71b88dc4193fe3c0306f00/coverage-7.13.3-cp311-cp311-win32.whl", hash = "sha256:fcda51c918c7a13ad93b5f89a58d56e3a072c9e0ba5c231b0ed81404bf2648fb", size = 221773, upload-time = "2026-02-03T14:00:17.462Z" }, - { url = "https://files.pythonhosted.org/packages/2f/71/165b3a6d3d052704a9ab52d11ea64ef3426745de517dda44d872716213a7/coverage-7.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:d1a049b5c51b3b679928dd35e47c4a2235e0b6128b479a7596d0ef5b42fa6301", size = 222711, upload-time = "2026-02-03T14:00:19.449Z" }, - { url = "https://files.pythonhosted.org/packages/51/d0/0ddc9c5934cdd52639c5df1f1eb0fdab51bb52348f3a8d1c7db9c600d93a/coverage-7.13.3-cp311-cp311-win_arm64.whl", hash = "sha256:79f2670c7e772f4917895c3d89aad59e01f3dbe68a4ed2d0373b431fad1dcfba", size = 221377, upload-time = "2026-02-03T14:00:20.968Z" }, - { url = "https://files.pythonhosted.org/packages/94/44/330f8e83b143f6668778ed61d17ece9dc48459e9e74669177de02f45fec5/coverage-7.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ed48b4170caa2c4420e0cd27dc977caaffc7eecc317355751df8373dddcef595", size = 219441, upload-time = "2026-02-03T14:00:22.585Z" }, - { url = "https://files.pythonhosted.org/packages/08/e7/29db05693562c2e65bdf6910c0af2fd6f9325b8f43caf7a258413f369e30/coverage-7.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8f2adf4bcffbbec41f366f2e6dffb9d24e8172d16e91da5799c9b7ed6b5716e6", size = 219801, upload-time = "2026-02-03T14:00:24.186Z" }, - { url = "https://files.pythonhosted.org/packages/90/ae/7f8a78249b02b0818db46220795f8ac8312ea4abd1d37d79ea81db5cae81/coverage-7.13.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01119735c690786b6966a1e9f098da4cd7ca9174c4cfe076d04e653105488395", size = 251306, upload-time = "2026-02-03T14:00:25.798Z" }, - { url = "https://files.pythonhosted.org/packages/62/71/a18a53d1808e09b2e9ebd6b47dad5e92daf4c38b0686b4c4d1b2f3e42b7f/coverage-7.13.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8bb09e83c603f152d855f666d70a71765ca8e67332e5829e62cb9466c176af23", size = 254051, upload-time = "2026-02-03T14:00:27.474Z" }, - { url = "https://files.pythonhosted.org/packages/4a/0a/eb30f6455d04c5a3396d0696cad2df0269ae7444bb322f86ffe3376f7bf9/coverage-7.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b607a40cba795cfac6d130220d25962931ce101f2f478a29822b19755377fb34", size = 255160, upload-time = "2026-02-03T14:00:29.024Z" }, - { url = "https://files.pythonhosted.org/packages/7b/7e/a45baac86274ce3ed842dbb84f14560c673ad30535f397d89164ec56c5df/coverage-7.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:44f14a62f5da2e9aedf9080e01d2cda61df39197d48e323538ec037336d68da8", size = 251709, upload-time = "2026-02-03T14:00:30.641Z" }, - { url = "https://files.pythonhosted.org/packages/c0/df/dd0dc12f30da11349993f3e218901fdf82f45ee44773596050c8f5a1fb25/coverage-7.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:debf29e0b157769843dff0981cc76f79e0ed04e36bb773c6cac5f6029054bd8a", size = 253083, upload-time = "2026-02-03T14:00:32.14Z" }, - { url = "https://files.pythonhosted.org/packages/ab/32/fc764c8389a8ce95cb90eb97af4c32f392ab0ac23ec57cadeefb887188d3/coverage-7.13.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:824bb95cd71604031ae9a48edb91fd6effde669522f960375668ed21b36e3ec4", size = 251227, upload-time = "2026-02-03T14:00:34.721Z" }, - { url = "https://files.pythonhosted.org/packages/dd/ca/d025e9da8f06f24c34d2da9873957cfc5f7e0d67802c3e34d0caa8452130/coverage-7.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:8f1010029a5b52dc427c8e2a8dbddb2303ddd180b806687d1acd1bb1d06649e7", size = 250794, upload-time = "2026-02-03T14:00:36.278Z" }, - { url = "https://files.pythonhosted.org/packages/45/c7/76bf35d5d488ec8f68682eb8e7671acc50a6d2d1c1182de1d2b6d4ffad3b/coverage-7.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cd5dee4fd7659d8306ffa79eeaaafd91fa30a302dac3af723b9b469e549247e0", size = 252671, upload-time = "2026-02-03T14:00:38.368Z" }, - { url = "https://files.pythonhosted.org/packages/bf/10/1921f1a03a7c209e1cb374f81a6b9b68b03cdb3ecc3433c189bc90e2a3d5/coverage-7.13.3-cp312-cp312-win32.whl", hash = "sha256:f7f153d0184d45f3873b3ad3ad22694fd73aadcb8cdbc4337ab4b41ea6b4dff1", size = 221986, upload-time = "2026-02-03T14:00:40.442Z" }, - { url = "https://files.pythonhosted.org/packages/3c/7c/f5d93297f8e125a80c15545edc754d93e0ed8ba255b65e609b185296af01/coverage-7.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:03a6e5e1e50819d6d7436f5bc40c92ded7e484e400716886ac921e35c133149d", size = 222793, upload-time = "2026-02-03T14:00:42.106Z" }, - { url = "https://files.pythonhosted.org/packages/43/59/c86b84170015b4555ebabca8649bdf9f4a1f737a73168088385ed0f947c4/coverage-7.13.3-cp312-cp312-win_arm64.whl", hash = "sha256:51c4c42c0e7d09a822b08b6cf79b3c4db8333fffde7450da946719ba0d45730f", size = 221410, upload-time = "2026-02-03T14:00:43.726Z" }, - { url = "https://files.pythonhosted.org/packages/81/f3/4c333da7b373e8c8bfb62517e8174a01dcc373d7a9083698e3b39d50d59c/coverage-7.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:853c3d3c79ff0db65797aad79dee6be020efd218ac4510f15a205f1e8d13ce25", size = 219468, upload-time = "2026-02-03T14:00:45.829Z" }, - { url = "https://files.pythonhosted.org/packages/d6/31/0714337b7d23630c8de2f4d56acf43c65f8728a45ed529b34410683f7217/coverage-7.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f75695e157c83d374f88dcc646a60cb94173304a9258b2e74ba5a66b7614a51a", size = 219839, upload-time = "2026-02-03T14:00:47.407Z" }, - { url = "https://files.pythonhosted.org/packages/12/99/bd6f2a2738144c98945666f90cae446ed870cecf0421c767475fcf42cdbe/coverage-7.13.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2d098709621d0819039f3f1e471ee554f55a0b2ac0d816883c765b14129b5627", size = 250828, upload-time = "2026-02-03T14:00:49.029Z" }, - { url = "https://files.pythonhosted.org/packages/6f/99/97b600225fbf631e6f5bfd3ad5bcaf87fbb9e34ff87492e5a572ff01bbe2/coverage-7.13.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:16d23d6579cf80a474ad160ca14d8b319abaa6db62759d6eef53b2fc979b58c8", size = 253432, upload-time = "2026-02-03T14:00:50.655Z" }, - { url = "https://files.pythonhosted.org/packages/5f/5c/abe2b3490bda26bd4f5e3e799be0bdf00bd81edebedc2c9da8d3ef288fa8/coverage-7.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00d34b29a59d2076e6f318b30a00a69bf63687e30cd882984ed444e753990cc1", size = 254672, upload-time = "2026-02-03T14:00:52.757Z" }, - { url = "https://files.pythonhosted.org/packages/31/ba/5d1957c76b40daff53971fe0adb84d9c2162b614280031d1d0653dd010c1/coverage-7.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ab6d72bffac9deb6e6cb0f61042e748de3f9f8e98afb0375a8e64b0b6e11746b", size = 251050, upload-time = "2026-02-03T14:00:54.332Z" }, - { url = "https://files.pythonhosted.org/packages/69/dc/dffdf3bfe9d32090f047d3c3085378558cb4eb6778cda7de414ad74581ed/coverage-7.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e129328ad1258e49cae0123a3b5fcb93d6c2fa90d540f0b4c7cdcdc019aaa3dc", size = 252801, upload-time = "2026-02-03T14:00:56.121Z" }, - { url = "https://files.pythonhosted.org/packages/87/51/cdf6198b0f2746e04511a30dc9185d7b8cdd895276c07bdb538e37f1cd50/coverage-7.13.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2213a8d88ed35459bda71597599d4eec7c2ebad201c88f0bfc2c26fd9b0dd2ea", size = 250763, upload-time = "2026-02-03T14:00:58.719Z" }, - { url = "https://files.pythonhosted.org/packages/d7/1a/596b7d62218c1d69f2475b69cc6b211e33c83c902f38ee6ae9766dd422da/coverage-7.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:00dd3f02de6d5f5c9c3d95e3e036c3c2e2a669f8bf2d3ceb92505c4ce7838f67", size = 250587, upload-time = "2026-02-03T14:01:01.197Z" }, - { url = "https://files.pythonhosted.org/packages/f7/46/52330d5841ff660f22c130b75f5e1dd3e352c8e7baef5e5fef6b14e3e991/coverage-7.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f9bada7bc660d20b23d7d312ebe29e927b655cf414dadcdb6335a2075695bd86", size = 252358, upload-time = "2026-02-03T14:01:02.824Z" }, - { url = "https://files.pythonhosted.org/packages/36/8a/e69a5be51923097ba7d5cff9724466e74fe486e9232020ba97c809a8b42b/coverage-7.13.3-cp313-cp313-win32.whl", hash = "sha256:75b3c0300f3fa15809bd62d9ca8b170eb21fcf0100eb4b4154d6dc8b3a5bbd43", size = 222007, upload-time = "2026-02-03T14:01:04.876Z" }, - { url = "https://files.pythonhosted.org/packages/0a/09/a5a069bcee0d613bdd48ee7637fa73bc09e7ed4342b26890f2df97cc9682/coverage-7.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:a2f7589c6132c44c53f6e705e1a6677e2b7821378c22f7703b2cf5388d0d4587", size = 222812, upload-time = "2026-02-03T14:01:07.296Z" }, - { url = "https://files.pythonhosted.org/packages/3d/4f/d62ad7dfe32f9e3d4a10c178bb6f98b10b083d6e0530ca202b399371f6c1/coverage-7.13.3-cp313-cp313-win_arm64.whl", hash = "sha256:123ceaf2b9d8c614f01110f908a341e05b1b305d6b2ada98763b9a5a59756051", size = 221433, upload-time = "2026-02-03T14:01:09.156Z" }, - { url = "https://files.pythonhosted.org/packages/04/b2/4876c46d723d80b9c5b695f1a11bf5f7c3dabf540ec00d6edc076ff025e6/coverage-7.13.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:cc7fd0f726795420f3678ac82ff882c7fc33770bd0074463b5aef7293285ace9", size = 220162, upload-time = "2026-02-03T14:01:11.409Z" }, - { url = "https://files.pythonhosted.org/packages/fc/04/9942b64a0e0bdda2c109f56bda42b2a59d9d3df4c94b85a323c1cae9fc77/coverage-7.13.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d358dc408edc28730aed5477a69338e444e62fba0b7e9e4a131c505fadad691e", size = 220510, upload-time = "2026-02-03T14:01:13.038Z" }, - { url = "https://files.pythonhosted.org/packages/5a/82/5cfe1e81eae525b74669f9795f37eb3edd4679b873d79d1e6c1c14ee6c1c/coverage-7.13.3-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5d67b9ed6f7b5527b209b24b3df9f2e5bf0198c1bbf99c6971b0e2dcb7e2a107", size = 261801, upload-time = "2026-02-03T14:01:14.674Z" }, - { url = "https://files.pythonhosted.org/packages/0b/ec/a553d7f742fd2cd12e36a16a7b4b3582d5934b496ef2b5ea8abeb10903d4/coverage-7.13.3-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:59224bfb2e9b37c1335ae35d00daa3a5b4e0b1a20f530be208fff1ecfa436f43", size = 263882, upload-time = "2026-02-03T14:01:16.343Z" }, - { url = "https://files.pythonhosted.org/packages/e1/58/8f54a2a93e3d675635bc406de1c9ac8d551312142ff52c9d71b5e533ad45/coverage-7.13.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9306b5299e31e31e0d3b908c66bcb6e7e3ddca143dea0266e9ce6c667346d3", size = 266306, upload-time = "2026-02-03T14:01:18.02Z" }, - { url = "https://files.pythonhosted.org/packages/1a/be/e593399fd6ea1f00aee79ebd7cc401021f218d34e96682a92e1bae092ff6/coverage-7.13.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:343aaeb5f8bb7bcd38620fd7bc56e6ee8207847d8c6103a1e7b72322d381ba4a", size = 261051, upload-time = "2026-02-03T14:01:19.757Z" }, - { url = "https://files.pythonhosted.org/packages/5c/e5/e9e0f6138b21bcdebccac36fbfde9cf15eb1bbcea9f5b1f35cd1f465fb91/coverage-7.13.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b2182129f4c101272ff5f2f18038d7b698db1bf8e7aa9e615cb48440899ad32e", size = 263868, upload-time = "2026-02-03T14:01:21.487Z" }, - { url = "https://files.pythonhosted.org/packages/9a/bf/de72cfebb69756f2d4a2dde35efcc33c47d85cd3ebdf844b3914aac2ef28/coverage-7.13.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:94d2ac94bd0cc57c5626f52f8c2fffed1444b5ae8c9fc68320306cc2b255e155", size = 261498, upload-time = "2026-02-03T14:01:23.097Z" }, - { url = "https://files.pythonhosted.org/packages/f2/91/4a2d313a70fc2e98ca53afd1c8ce67a89b1944cd996589a5b1fe7fbb3e5c/coverage-7.13.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:65436cde5ecabe26fb2f0bf598962f0a054d3f23ad529361326ac002c61a2a1e", size = 260394, upload-time = "2026-02-03T14:01:24.949Z" }, - { url = "https://files.pythonhosted.org/packages/40/83/25113af7cf6941e779eb7ed8de2a677865b859a07ccee9146d4cc06a03e3/coverage-7.13.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:db83b77f97129813dbd463a67e5335adc6a6a91db652cc085d60c2d512746f96", size = 262579, upload-time = "2026-02-03T14:01:26.703Z" }, - { url = "https://files.pythonhosted.org/packages/1e/19/a5f2b96262977e82fb9aabbe19b4d83561f5d063f18dde3e72f34ffc3b2f/coverage-7.13.3-cp313-cp313t-win32.whl", hash = "sha256:dfb428e41377e6b9ba1b0a32df6db5409cb089a0ed1d0a672dc4953ec110d84f", size = 222679, upload-time = "2026-02-03T14:01:28.553Z" }, - { url = "https://files.pythonhosted.org/packages/81/82/ef1747b88c87a5c7d7edc3704799ebd650189a9158e680a063308b6125ef/coverage-7.13.3-cp313-cp313t-win_amd64.whl", hash = "sha256:5badd7e596e6b0c89aa8ec6d37f4473e4357f982ce57f9a2942b0221cd9cf60c", size = 223740, upload-time = "2026-02-03T14:01:30.776Z" }, - { url = "https://files.pythonhosted.org/packages/1c/4c/a67c7bb5b560241c22736a9cb2f14c5034149ffae18630323fde787339e4/coverage-7.13.3-cp313-cp313t-win_arm64.whl", hash = "sha256:989aa158c0eb19d83c76c26f4ba00dbb272485c56e452010a3450bdbc9daafd9", size = 221996, upload-time = "2026-02-03T14:01:32.495Z" }, - { url = "https://files.pythonhosted.org/packages/5e/b3/677bb43427fed9298905106f39c6520ac75f746f81b8f01104526a8026e4/coverage-7.13.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c6f6169bbdbdb85aab8ac0392d776948907267fcc91deeacf6f9d55f7a83ae3b", size = 219513, upload-time = "2026-02-03T14:01:34.29Z" }, - { url = "https://files.pythonhosted.org/packages/42/53/290046e3bbf8986cdb7366a42dab3440b9983711eaff044a51b11006c67b/coverage-7.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2f5e731627a3d5ef11a2a35aa0c6f7c435867c7ccbc391268eb4f2ca5dbdcc10", size = 219850, upload-time = "2026-02-03T14:01:35.984Z" }, - { url = "https://files.pythonhosted.org/packages/ea/2b/ab41f10345ba2e49d5e299be8663be2b7db33e77ac1b85cd0af985ea6406/coverage-7.13.3-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9db3a3285d91c0b70fab9f39f0a4aa37d375873677efe4e71e58d8321e8c5d39", size = 250886, upload-time = "2026-02-03T14:01:38.287Z" }, - { url = "https://files.pythonhosted.org/packages/72/2d/b3f6913ee5a1d5cdd04106f257e5fac5d048992ffc2d9995d07b0f17739f/coverage-7.13.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:06e49c5897cb12e3f7ecdc111d44e97c4f6d0557b81a7a0204ed70a8b038f86f", size = 253393, upload-time = "2026-02-03T14:01:40.118Z" }, - { url = "https://files.pythonhosted.org/packages/f0/f6/b1f48810ffc6accf49a35b9943636560768f0812330f7456aa87dc39aff5/coverage-7.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb25061a66802df9fc13a9ba1967d25faa4dae0418db469264fd9860a921dde4", size = 254740, upload-time = "2026-02-03T14:01:42.413Z" }, - { url = "https://files.pythonhosted.org/packages/57/d0/e59c54f9be0b61808f6bc4c8c4346bd79f02dd6bbc3f476ef26124661f20/coverage-7.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:99fee45adbb1caeb914da16f70e557fb7ff6ddc9e4b14de665bd41af631367ef", size = 250905, upload-time = "2026-02-03T14:01:44.163Z" }, - { url = "https://files.pythonhosted.org/packages/d5/f7/5291bcdf498bafbee3796bb32ef6966e9915aebd4d0954123c8eae921c32/coverage-7.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:318002f1fd819bdc1651c619268aa5bc853c35fa5cc6d1e8c96bd9cd6c828b75", size = 252753, upload-time = "2026-02-03T14:01:45.974Z" }, - { url = "https://files.pythonhosted.org/packages/a0/a9/1dcafa918c281554dae6e10ece88c1add82db685be123e1b05c2056ff3fb/coverage-7.13.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:71295f2d1d170b9977dc386d46a7a1b7cbb30e5405492529b4c930113a33f895", size = 250716, upload-time = "2026-02-03T14:01:48.844Z" }, - { url = "https://files.pythonhosted.org/packages/44/bb/4ea4eabcce8c4f6235df6e059fbc5db49107b24c4bdffc44aee81aeca5a8/coverage-7.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:5b1ad2e0dc672625c44bc4fe34514602a9fd8b10d52ddc414dc585f74453516c", size = 250530, upload-time = "2026-02-03T14:01:50.793Z" }, - { url = "https://files.pythonhosted.org/packages/6d/31/4a6c9e6a71367e6f923b27b528448c37f4e959b7e4029330523014691007/coverage-7.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b2beb64c145593a50d90db5c7178f55daeae129123b0d265bdb3cbec83e5194a", size = 252186, upload-time = "2026-02-03T14:01:52.607Z" }, - { url = "https://files.pythonhosted.org/packages/27/92/e1451ef6390a4f655dc42da35d9971212f7abbbcad0bdb7af4407897eb76/coverage-7.13.3-cp314-cp314-win32.whl", hash = "sha256:3d1aed4f4e837a832df2f3b4f68a690eede0de4560a2dbc214ea0bc55aabcdb4", size = 222253, upload-time = "2026-02-03T14:01:55.071Z" }, - { url = "https://files.pythonhosted.org/packages/8a/98/78885a861a88de020c32a2693487c37d15a9873372953f0c3c159d575a43/coverage-7.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f9efbbaf79f935d5fbe3ad814825cbce4f6cdb3054384cb49f0c0f496125fa0", size = 223069, upload-time = "2026-02-03T14:01:56.95Z" }, - { url = "https://files.pythonhosted.org/packages/eb/fb/3784753a48da58a5337972abf7ca58b1fb0f1bda21bc7b4fae992fd28e47/coverage-7.13.3-cp314-cp314-win_arm64.whl", hash = "sha256:31b6e889c53d4e6687ca63706148049494aace140cffece1c4dc6acadb70a7b3", size = 221633, upload-time = "2026-02-03T14:01:58.758Z" }, - { url = "https://files.pythonhosted.org/packages/40/f9/75b732d9674d32cdbffe801ed5f770786dd1c97eecedef2125b0d25102dc/coverage-7.13.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:c5e9787cec750793a19a28df7edd85ac4e49d3fb91721afcdc3b86f6c08d9aa8", size = 220243, upload-time = "2026-02-03T14:02:01.109Z" }, - { url = "https://files.pythonhosted.org/packages/cf/7e/2868ec95de5a65703e6f0c87407ea822d1feb3619600fbc3c1c4fa986090/coverage-7.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e5b86db331c682fd0e4be7098e6acee5e8a293f824d41487c667a93705d415ca", size = 220515, upload-time = "2026-02-03T14:02:02.862Z" }, - { url = "https://files.pythonhosted.org/packages/7d/eb/9f0d349652fced20bcaea0f67fc5777bd097c92369f267975732f3dc5f45/coverage-7.13.3-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:edc7754932682d52cf6e7a71806e529ecd5ce660e630e8bd1d37109a2e5f63ba", size = 261874, upload-time = "2026-02-03T14:02:04.727Z" }, - { url = "https://files.pythonhosted.org/packages/ee/a5/6619bc4a6c7b139b16818149a3e74ab2e21599ff9a7b6811b6afde99f8ec/coverage-7.13.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3a16d6398666510a6886f67f43d9537bfd0e13aca299688a19daa84f543122f", size = 264004, upload-time = "2026-02-03T14:02:06.634Z" }, - { url = "https://files.pythonhosted.org/packages/29/b7/90aa3fc645a50c6f07881fca4fd0ba21e3bfb6ce3a7078424ea3a35c74c9/coverage-7.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:303d38b19626c1981e1bb067a9928236d88eb0e4479b18a74812f05a82071508", size = 266408, upload-time = "2026-02-03T14:02:09.037Z" }, - { url = "https://files.pythonhosted.org/packages/62/55/08bb2a1e4dcbae384e638f0effef486ba5987b06700e481691891427d879/coverage-7.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:284e06eadfe15ddfee2f4ee56631f164ef897a7d7d5a15bca5f0bb88889fc5ba", size = 260977, upload-time = "2026-02-03T14:02:11.755Z" }, - { url = "https://files.pythonhosted.org/packages/9b/76/8bd4ae055a42d8fb5dd2230e5cf36ff2e05f85f2427e91b11a27fea52ed7/coverage-7.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d401f0864a1d3198422816878e4e84ca89ec1c1bf166ecc0ae01380a39b888cd", size = 263868, upload-time = "2026-02-03T14:02:13.565Z" }, - { url = "https://files.pythonhosted.org/packages/e3/f9/ba000560f11e9e32ec03df5aa8477242c2d95b379c99ac9a7b2e7fbacb1a/coverage-7.13.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3f379b02c18a64de78c4ccdddf1c81c2c5ae1956c72dacb9133d7dd7809794ab", size = 261474, upload-time = "2026-02-03T14:02:16.069Z" }, - { url = "https://files.pythonhosted.org/packages/90/4b/4de4de8f9ca7af4733bfcf4baa440121b7dbb3856daf8428ce91481ff63b/coverage-7.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:7a482f2da9086971efb12daca1d6547007ede3674ea06e16d7663414445c683e", size = 260317, upload-time = "2026-02-03T14:02:17.996Z" }, - { url = "https://files.pythonhosted.org/packages/05/71/5cd8436e2c21410ff70be81f738c0dddea91bcc3189b1517d26e0102ccb3/coverage-7.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:562136b0d401992118d9b49fbee5454e16f95f85b120a4226a04d816e33fe024", size = 262635, upload-time = "2026-02-03T14:02:20.405Z" }, - { url = "https://files.pythonhosted.org/packages/e7/f8/2834bb45bdd70b55a33ec354b8b5f6062fc90e5bb787e14385903a979503/coverage-7.13.3-cp314-cp314t-win32.whl", hash = "sha256:ca46e5c3be3b195098dd88711890b8011a9fa4feca942292bb84714ce5eab5d3", size = 223035, upload-time = "2026-02-03T14:02:22.323Z" }, - { url = "https://files.pythonhosted.org/packages/26/75/f8290f0073c00d9ae14056d2b84ab92dff21d5370e464cb6cb06f52bf580/coverage-7.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:06d316dbb3d9fd44cca05b2dbcfbef22948493d63a1f28e828d43e6cc505fed8", size = 224142, upload-time = "2026-02-03T14:02:24.143Z" }, - { url = "https://files.pythonhosted.org/packages/03/01/43ac78dfea8946c4a9161bbc034b5549115cb2b56781a4b574927f0d141a/coverage-7.13.3-cp314-cp314t-win_arm64.whl", hash = "sha256:299d66e9218193f9dc6e4880629ed7c4cd23486005166247c283fb98531656c3", size = 222166, upload-time = "2026-02-03T14:02:26.005Z" }, - { url = "https://files.pythonhosted.org/packages/7d/fb/70af542d2d938c778c9373ce253aa4116dbe7c0a5672f78b2b2ae0e1b94b/coverage-7.13.3-py3-none-any.whl", hash = "sha256:90a8af9dba6429b2573199622d72e0ebf024d6276f16abce394ad4d181bb0910", size = 211237, upload-time = "2026-02-03T14:02:27.986Z" }, +version = "7.13.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/56/95b7e30fa389756cb56630faa728da46a27b8c6eb46f9d557c68fff12b65/coverage-7.13.4.tar.gz", hash = "sha256:e5c8f6ed1e61a8b2dcdf31eb0b9bbf0130750ca79c1c49eb898e2ad86f5ccc91", size = 827239, upload-time = "2026-02-09T12:59:03.86Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/d4/7827d9ffa34d5d4d752eec907022aa417120936282fc488306f5da08c292/coverage-7.13.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fc31c787a84f8cd6027eba44010517020e0d18487064cd3d8968941856d1415", size = 219152, upload-time = "2026-02-09T12:56:11.974Z" }, + { url = "https://files.pythonhosted.org/packages/35/b0/d69df26607c64043292644dbb9dc54b0856fabaa2cbb1eeee3331cc9e280/coverage-7.13.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a32ebc02a1805adf637fc8dec324b5cdacd2e493515424f70ee33799573d661b", size = 219667, upload-time = "2026-02-09T12:56:13.33Z" }, + { url = "https://files.pythonhosted.org/packages/82/a4/c1523f7c9e47b2271dbf8c2a097e7a1f89ef0d66f5840bb59b7e8814157b/coverage-7.13.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e24f9156097ff9dc286f2f913df3a7f63c0e333dcafa3c196f2c18b4175ca09a", size = 246425, upload-time = "2026-02-09T12:56:14.552Z" }, + { url = "https://files.pythonhosted.org/packages/f8/02/aa7ec01d1a5023c4b680ab7257f9bfde9defe8fdddfe40be096ac19e8177/coverage-7.13.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8041b6c5bfdc03257666e9881d33b1abc88daccaf73f7b6340fb7946655cd10f", size = 248229, upload-time = "2026-02-09T12:56:16.31Z" }, + { url = "https://files.pythonhosted.org/packages/35/98/85aba0aed5126d896162087ef3f0e789a225697245256fc6181b95f47207/coverage-7.13.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2a09cfa6a5862bc2fc6ca7c3def5b2926194a56b8ab78ffcf617d28911123012", size = 250106, upload-time = "2026-02-09T12:56:18.024Z" }, + { url = "https://files.pythonhosted.org/packages/96/72/1db59bd67494bc162e3e4cd5fbc7edba2c7026b22f7c8ef1496d58c2b94c/coverage-7.13.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:296f8b0af861d3970c2a4d8c91d48eb4dd4771bcef9baedec6a9b515d7de3def", size = 252021, upload-time = "2026-02-09T12:56:19.272Z" }, + { url = "https://files.pythonhosted.org/packages/9d/97/72899c59c7066961de6e3daa142d459d47d104956db43e057e034f015c8a/coverage-7.13.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e101609bcbbfb04605ea1027b10dc3735c094d12d40826a60f897b98b1c30256", size = 247114, upload-time = "2026-02-09T12:56:21.051Z" }, + { url = "https://files.pythonhosted.org/packages/39/1f/f1885573b5970235e908da4389176936c8933e86cb316b9620aab1585fa2/coverage-7.13.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aa3feb8db2e87ff5e6d00d7e1480ae241876286691265657b500886c98f38bda", size = 248143, upload-time = "2026-02-09T12:56:22.585Z" }, + { url = "https://files.pythonhosted.org/packages/a8/cf/e80390c5b7480b722fa3e994f8202807799b85bc562aa4f1dde209fbb7be/coverage-7.13.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4fc7fa81bbaf5a02801b65346c8b3e657f1d93763e58c0abdf7c992addd81a92", size = 246152, upload-time = "2026-02-09T12:56:23.748Z" }, + { url = "https://files.pythonhosted.org/packages/44/bf/f89a8350d85572f95412debb0fb9bb4795b1d5b5232bd652923c759e787b/coverage-7.13.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:33901f604424145c6e9c2398684b92e176c0b12df77d52db81c20abd48c3794c", size = 249959, upload-time = "2026-02-09T12:56:25.209Z" }, + { url = "https://files.pythonhosted.org/packages/f7/6e/612a02aece8178c818df273e8d1642190c4875402ca2ba74514394b27aba/coverage-7.13.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:bb28c0f2cf2782508a40cec377935829d5fcc3ad9a3681375af4e84eb34b6b58", size = 246416, upload-time = "2026-02-09T12:56:26.475Z" }, + { url = "https://files.pythonhosted.org/packages/cb/98/b5afc39af67c2fa6786b03c3a7091fc300947387ce8914b096db8a73d67a/coverage-7.13.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9d107aff57a83222ddbd8d9ee705ede2af2cc926608b57abed8ef96b50b7e8f9", size = 247025, upload-time = "2026-02-09T12:56:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/51/30/2bba8ef0682d5bd210c38fe497e12a06c9f8d663f7025e9f5c2c31ce847d/coverage-7.13.4-cp310-cp310-win32.whl", hash = "sha256:a6f94a7d00eb18f1b6d403c91a88fd58cfc92d4b16080dfdb774afc8294469bf", size = 221758, upload-time = "2026-02-09T12:56:29.051Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/331f94934cf6c092b8ea59ff868eb587bc8fe0893f02c55bc6c0183a192e/coverage-7.13.4-cp310-cp310-win_amd64.whl", hash = "sha256:2cb0f1e000ebc419632bbe04366a8990b6e32c4e0b51543a6484ffe15eaeda95", size = 222693, upload-time = "2026-02-09T12:56:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/b4/ad/b59e5b451cf7172b8d1043dc0fa718f23aab379bc1521ee13d4bd9bfa960/coverage-7.13.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d490ba50c3f35dd7c17953c68f3270e7ccd1c6642e2d2afe2d8e720b98f5a053", size = 219278, upload-time = "2026-02-09T12:56:31.673Z" }, + { url = "https://files.pythonhosted.org/packages/f1/17/0cb7ca3de72e5f4ef2ec2fa0089beafbcaaaead1844e8b8a63d35173d77d/coverage-7.13.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:19bc3c88078789f8ef36acb014d7241961dbf883fd2533d18cb1e7a5b4e28b11", size = 219783, upload-time = "2026-02-09T12:56:33.104Z" }, + { url = "https://files.pythonhosted.org/packages/ab/63/325d8e5b11e0eaf6d0f6a44fad444ae58820929a9b0de943fa377fe73e85/coverage-7.13.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3998e5a32e62fdf410c0dbd3115df86297995d6e3429af80b8798aad894ca7aa", size = 250200, upload-time = "2026-02-09T12:56:34.474Z" }, + { url = "https://files.pythonhosted.org/packages/76/53/c16972708cbb79f2942922571a687c52bd109a7bd51175aeb7558dff2236/coverage-7.13.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8e264226ec98e01a8e1054314af91ee6cde0eacac4f465cc93b03dbe0bce2fd7", size = 252114, upload-time = "2026-02-09T12:56:35.749Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c2/7ab36d8b8cc412bec9ea2d07c83c48930eb4ba649634ba00cb7e4e0f9017/coverage-7.13.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a3aa4e7b9e416774b21797365b358a6e827ffadaaca81b69ee02946852449f00", size = 254220, upload-time = "2026-02-09T12:56:37.796Z" }, + { url = "https://files.pythonhosted.org/packages/d6/4d/cf52c9a3322c89a0e6febdfbc83bb45c0ed3c64ad14081b9503adee702e7/coverage-7.13.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:71ca20079dd8f27fcf808817e281e90220475cd75115162218d0e27549f95fef", size = 256164, upload-time = "2026-02-09T12:56:39.016Z" }, + { url = "https://files.pythonhosted.org/packages/78/e9/eb1dd17bd6de8289df3580e967e78294f352a5df8a57ff4671ee5fc3dcd0/coverage-7.13.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e2f25215f1a359ab17320b47bcdaca3e6e6356652e8256f2441e4ef972052903", size = 250325, upload-time = "2026-02-09T12:56:40.668Z" }, + { url = "https://files.pythonhosted.org/packages/71/07/8c1542aa873728f72267c07278c5cc0ec91356daf974df21335ccdb46368/coverage-7.13.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d65b2d373032411e86960604dc4edac91fdfb5dca539461cf2cbe78327d1e64f", size = 251913, upload-time = "2026-02-09T12:56:41.97Z" }, + { url = "https://files.pythonhosted.org/packages/74/d7/c62e2c5e4483a748e27868e4c32ad3daa9bdddbba58e1bc7a15e252baa74/coverage-7.13.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94eb63f9b363180aff17de3e7c8760c3ba94664ea2695c52f10111244d16a299", size = 249974, upload-time = "2026-02-09T12:56:43.323Z" }, + { url = "https://files.pythonhosted.org/packages/98/9f/4c5c015a6e98ced54efd0f5cf8d31b88e5504ecb6857585fc0161bb1e600/coverage-7.13.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e856bf6616714c3a9fbc270ab54103f4e685ba236fa98c054e8f87f266c93505", size = 253741, upload-time = "2026-02-09T12:56:45.155Z" }, + { url = "https://files.pythonhosted.org/packages/bd/59/0f4eef89b9f0fcd9633b5d350016f54126ab49426a70ff4c4e87446cabdc/coverage-7.13.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:65dfcbe305c3dfe658492df2d85259e0d79ead4177f9ae724b6fb245198f55d6", size = 249695, upload-time = "2026-02-09T12:56:46.636Z" }, + { url = "https://files.pythonhosted.org/packages/b5/2c/b7476f938deb07166f3eb281a385c262675d688ff4659ad56c6c6b8e2e70/coverage-7.13.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b507778ae8a4c915436ed5c2e05b4a6cecfa70f734e19c22a005152a11c7b6a9", size = 250599, upload-time = "2026-02-09T12:56:48.13Z" }, + { url = "https://files.pythonhosted.org/packages/b8/34/c3420709d9846ee3785b9f2831b4d94f276f38884032dca1457fa83f7476/coverage-7.13.4-cp311-cp311-win32.whl", hash = "sha256:784fc3cf8be001197b652d51d3fd259b1e2262888693a4636e18879f613a62a9", size = 221780, upload-time = "2026-02-09T12:56:50.479Z" }, + { url = "https://files.pythonhosted.org/packages/61/08/3d9c8613079d2b11c185b865de9a4c1a68850cfda2b357fae365cf609f29/coverage-7.13.4-cp311-cp311-win_amd64.whl", hash = "sha256:2421d591f8ca05b308cf0092807308b2facbefe54af7c02ac22548b88b95c98f", size = 222715, upload-time = "2026-02-09T12:56:51.815Z" }, + { url = "https://files.pythonhosted.org/packages/18/1a/54c3c80b2f056164cc0a6cdcb040733760c7c4be9d780fe655f356f433e4/coverage-7.13.4-cp311-cp311-win_arm64.whl", hash = "sha256:79e73a76b854d9c6088fe5d8b2ebe745f8681c55f7397c3c0a016192d681045f", size = 221385, upload-time = "2026-02-09T12:56:53.194Z" }, + { url = "https://files.pythonhosted.org/packages/d1/81/4ce2fdd909c5a0ed1f6dedb88aa57ab79b6d1fbd9b588c1ac7ef45659566/coverage-7.13.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:02231499b08dabbe2b96612993e5fc34217cdae907a51b906ac7fca8027a4459", size = 219449, upload-time = "2026-02-09T12:56:54.889Z" }, + { url = "https://files.pythonhosted.org/packages/5d/96/5238b1efc5922ddbdc9b0db9243152c09777804fb7c02ad1741eb18a11c0/coverage-7.13.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40aa8808140e55dc022b15d8aa7f651b6b3d68b365ea0398f1441e0b04d859c3", size = 219810, upload-time = "2026-02-09T12:56:56.33Z" }, + { url = "https://files.pythonhosted.org/packages/78/72/2f372b726d433c9c35e56377cf1d513b4c16fe51841060d826b95caacec1/coverage-7.13.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5b856a8ccf749480024ff3bd7310adaef57bf31fd17e1bfc404b7940b6986634", size = 251308, upload-time = "2026-02-09T12:56:57.858Z" }, + { url = "https://files.pythonhosted.org/packages/5d/a0/2ea570925524ef4e00bb6c82649f5682a77fac5ab910a65c9284de422600/coverage-7.13.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2c048ea43875fbf8b45d476ad79f179809c590ec7b79e2035c662e7afa3192e3", size = 254052, upload-time = "2026-02-09T12:56:59.754Z" }, + { url = "https://files.pythonhosted.org/packages/e8/ac/45dc2e19a1939098d783c846e130b8f862fbb50d09e0af663988f2f21973/coverage-7.13.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b7b38448866e83176e28086674fe7368ab8590e4610fb662b44e345b86d63ffa", size = 255165, upload-time = "2026-02-09T12:57:01.287Z" }, + { url = "https://files.pythonhosted.org/packages/2d/4d/26d236ff35abc3b5e63540d3386e4c3b192168c1d96da5cb2f43c640970f/coverage-7.13.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:de6defc1c9badbf8b9e67ae90fd00519186d6ab64e5cc5f3d21359c2a9b2c1d3", size = 257432, upload-time = "2026-02-09T12:57:02.637Z" }, + { url = "https://files.pythonhosted.org/packages/ec/55/14a966c757d1348b2e19caf699415a2a4c4f7feaa4bbc6326a51f5c7dd1b/coverage-7.13.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7eda778067ad7ffccd23ecffce537dface96212576a07924cbf0d8799d2ded5a", size = 251716, upload-time = "2026-02-09T12:57:04.056Z" }, + { url = "https://files.pythonhosted.org/packages/77/33/50116647905837c66d28b2af1321b845d5f5d19be9655cb84d4a0ea806b4/coverage-7.13.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e87f6c587c3f34356c3759f0420693e35e7eb0e2e41e4c011cb6ec6ecbbf1db7", size = 253089, upload-time = "2026-02-09T12:57:05.503Z" }, + { url = "https://files.pythonhosted.org/packages/c2/b4/8efb11a46e3665d92635a56e4f2d4529de6d33f2cb38afd47d779d15fc99/coverage-7.13.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:8248977c2e33aecb2ced42fef99f2d319e9904a36e55a8a68b69207fb7e43edc", size = 251232, upload-time = "2026-02-09T12:57:06.879Z" }, + { url = "https://files.pythonhosted.org/packages/51/24/8cd73dd399b812cc76bb0ac260e671c4163093441847ffe058ac9fda1e32/coverage-7.13.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:25381386e80ae727608e662474db537d4df1ecd42379b5ba33c84633a2b36d47", size = 255299, upload-time = "2026-02-09T12:57:08.245Z" }, + { url = "https://files.pythonhosted.org/packages/03/94/0a4b12f1d0e029ce1ccc1c800944a9984cbe7d678e470bb6d3c6bc38a0da/coverage-7.13.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:ee756f00726693e5ba94d6df2bdfd64d4852d23b09bb0bc700e3b30e6f333985", size = 250796, upload-time = "2026-02-09T12:57:10.142Z" }, + { url = "https://files.pythonhosted.org/packages/73/44/6002fbf88f6698ca034360ce474c406be6d5a985b3fdb3401128031eef6b/coverage-7.13.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fdfc1e28e7c7cdce44985b3043bc13bbd9c747520f94a4d7164af8260b3d91f0", size = 252673, upload-time = "2026-02-09T12:57:12.197Z" }, + { url = "https://files.pythonhosted.org/packages/de/c6/a0279f7c00e786be75a749a5674e6fa267bcbd8209cd10c9a450c655dfa7/coverage-7.13.4-cp312-cp312-win32.whl", hash = "sha256:01d4cbc3c283a17fc1e42d614a119f7f438eabb593391283adca8dc86eff1246", size = 221990, upload-time = "2026-02-09T12:57:14.085Z" }, + { url = "https://files.pythonhosted.org/packages/77/4e/c0a25a425fcf5557d9abd18419c95b63922e897bc86c1f327f155ef234a9/coverage-7.13.4-cp312-cp312-win_amd64.whl", hash = "sha256:9401ebc7ef522f01d01d45532c68c5ac40fb27113019b6b7d8b208f6e9baa126", size = 222800, upload-time = "2026-02-09T12:57:15.944Z" }, + { url = "https://files.pythonhosted.org/packages/47/ac/92da44ad9a6f4e3a7debd178949d6f3769bedca33830ce9b1dcdab589a37/coverage-7.13.4-cp312-cp312-win_arm64.whl", hash = "sha256:b1ec7b6b6e93255f952e27ab58fbc68dcc468844b16ecbee881aeb29b6ab4d8d", size = 221415, upload-time = "2026-02-09T12:57:17.497Z" }, + { url = "https://files.pythonhosted.org/packages/db/23/aad45061a31677d68e47499197a131eea55da4875d16c1f42021ab963503/coverage-7.13.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b66a2da594b6068b48b2692f043f35d4d3693fb639d5ea8b39533c2ad9ac3ab9", size = 219474, upload-time = "2026-02-09T12:57:19.332Z" }, + { url = "https://files.pythonhosted.org/packages/a5/70/9b8b67a0945f3dfec1fd896c5cefb7c19d5a3a6d74630b99a895170999ae/coverage-7.13.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3599eb3992d814d23b35c536c28df1a882caa950f8f507cef23d1cbf334995ac", size = 219844, upload-time = "2026-02-09T12:57:20.66Z" }, + { url = "https://files.pythonhosted.org/packages/97/fd/7e859f8fab324cef6c4ad7cff156ca7c489fef9179d5749b0c8d321281c2/coverage-7.13.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:93550784d9281e374fb5a12bf1324cc8a963fd63b2d2f223503ef0fd4aa339ea", size = 250832, upload-time = "2026-02-09T12:57:22.007Z" }, + { url = "https://files.pythonhosted.org/packages/e4/dc/b2442d10020c2f52617828862d8b6ee337859cd8f3a1f13d607dddda9cf7/coverage-7.13.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b720ce6a88a2755f7c697c23268ddc47a571b88052e6b155224347389fdf6a3b", size = 253434, upload-time = "2026-02-09T12:57:23.339Z" }, + { url = "https://files.pythonhosted.org/packages/5a/88/6728a7ad17428b18d836540630487231f5470fb82454871149502f5e5aa2/coverage-7.13.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7b322db1284a2ed3aa28ffd8ebe3db91c929b7a333c0820abec3d838ef5b3525", size = 254676, upload-time = "2026-02-09T12:57:24.774Z" }, + { url = "https://files.pythonhosted.org/packages/7c/bc/21244b1b8cedf0dff0a2b53b208015fe798d5f2a8d5348dbfece04224fff/coverage-7.13.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f4594c67d8a7c89cf922d9df0438c7c7bb022ad506eddb0fdb2863359ff78242", size = 256807, upload-time = "2026-02-09T12:57:26.125Z" }, + { url = "https://files.pythonhosted.org/packages/97/a0/ddba7ed3251cff51006737a727d84e05b61517d1784a9988a846ba508877/coverage-7.13.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:53d133df809c743eb8bce33b24bcababb371f4441340578cd406e084d94a6148", size = 251058, upload-time = "2026-02-09T12:57:27.614Z" }, + { url = "https://files.pythonhosted.org/packages/9b/55/e289addf7ff54d3a540526f33751951bf0878f3809b47f6dfb3def69c6f7/coverage-7.13.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76451d1978b95ba6507a039090ba076105c87cc76fc3efd5d35d72093964d49a", size = 252805, upload-time = "2026-02-09T12:57:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/13/4e/cc276b1fa4a59be56d96f1dabddbdc30f4ba22e3b1cd42504c37b3313255/coverage-7.13.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7f57b33491e281e962021de110b451ab8a24182589be17e12a22c79047935e23", size = 250766, upload-time = "2026-02-09T12:57:30.522Z" }, + { url = "https://files.pythonhosted.org/packages/94/44/1093b8f93018f8b41a8cf29636c9292502f05e4a113d4d107d14a3acd044/coverage-7.13.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1731dc33dc276dafc410a885cbf5992f1ff171393e48a21453b78727d090de80", size = 254923, upload-time = "2026-02-09T12:57:31.946Z" }, + { url = "https://files.pythonhosted.org/packages/8b/55/ea2796da2d42257f37dbea1aab239ba9263b31bd91d5527cdd6db5efe174/coverage-7.13.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:bd60d4fe2f6fa7dff9223ca1bbc9f05d2b6697bc5961072e5d3b952d46e1b1ea", size = 250591, upload-time = "2026-02-09T12:57:33.842Z" }, + { url = "https://files.pythonhosted.org/packages/d4/fa/7c4bb72aacf8af5020675aa633e59c1fbe296d22aed191b6a5b711eb2bc7/coverage-7.13.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9181a3ccead280b828fae232df12b16652702b49d41e99d657f46cc7b1f6ec7a", size = 252364, upload-time = "2026-02-09T12:57:35.743Z" }, + { url = "https://files.pythonhosted.org/packages/5c/38/a8d2ec0146479c20bbaa7181b5b455a0c41101eed57f10dd19a78ab44c80/coverage-7.13.4-cp313-cp313-win32.whl", hash = "sha256:f53d492307962561ac7de4cd1de3e363589b000ab69617c6156a16ba7237998d", size = 222010, upload-time = "2026-02-09T12:57:37.25Z" }, + { url = "https://files.pythonhosted.org/packages/e2/0c/dbfafbe90a185943dcfbc766fe0e1909f658811492d79b741523a414a6cc/coverage-7.13.4-cp313-cp313-win_amd64.whl", hash = "sha256:e6f70dec1cc557e52df5306d051ef56003f74d56e9c4dd7ddb07e07ef32a84dd", size = 222818, upload-time = "2026-02-09T12:57:38.734Z" }, + { url = "https://files.pythonhosted.org/packages/04/d1/934918a138c932c90d78301f45f677fb05c39a3112b96fd2c8e60503cdc7/coverage-7.13.4-cp313-cp313-win_arm64.whl", hash = "sha256:fb07dc5da7e849e2ad31a5d74e9bece81f30ecf5a42909d0a695f8bd1874d6af", size = 221438, upload-time = "2026-02-09T12:57:40.223Z" }, + { url = "https://files.pythonhosted.org/packages/52/57/ee93ced533bcb3e6df961c0c6e42da2fc6addae53fb95b94a89b1e33ebd7/coverage-7.13.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:40d74da8e6c4b9ac18b15331c4b5ebc35a17069410cad462ad4f40dcd2d50c0d", size = 220165, upload-time = "2026-02-09T12:57:41.639Z" }, + { url = "https://files.pythonhosted.org/packages/c5/e0/969fc285a6fbdda49d91af278488d904dcd7651b2693872f0ff94e40e84a/coverage-7.13.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4223b4230a376138939a9173f1bdd6521994f2aff8047fae100d6d94d50c5a12", size = 220516, upload-time = "2026-02-09T12:57:44.215Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b8/9531944e16267e2735a30a9641ff49671f07e8138ecf1ca13db9fd2560c7/coverage-7.13.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1d4be36a5114c499f9f1f9195e95ebf979460dbe2d88e6816ea202010ba1c34b", size = 261804, upload-time = "2026-02-09T12:57:45.989Z" }, + { url = "https://files.pythonhosted.org/packages/8a/f3/e63df6d500314a2a60390d1989240d5f27318a7a68fa30ad3806e2a9323e/coverage-7.13.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:200dea7d1e8095cc6e98cdabe3fd1d21ab17d3cee6dab00cadbb2fe35d9c15b9", size = 263885, upload-time = "2026-02-09T12:57:47.42Z" }, + { url = "https://files.pythonhosted.org/packages/f3/67/7654810de580e14b37670b60a09c599fa348e48312db5b216d730857ffe6/coverage-7.13.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8eb931ee8e6d8243e253e5ed7336deea6904369d2fd8ae6e43f68abbf167092", size = 266308, upload-time = "2026-02-09T12:57:49.345Z" }, + { url = "https://files.pythonhosted.org/packages/37/6f/39d41eca0eab3cc82115953ad41c4e77935286c930e8fad15eaed1389d83/coverage-7.13.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:75eab1ebe4f2f64d9509b984f9314d4aa788540368218b858dad56dc8f3e5eb9", size = 267452, upload-time = "2026-02-09T12:57:50.811Z" }, + { url = "https://files.pythonhosted.org/packages/50/6d/39c0fbb8fc5cd4d2090811e553c2108cf5112e882f82505ee7495349a6bf/coverage-7.13.4-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c35eb28c1d085eb7d8c9b3296567a1bebe03ce72962e932431b9a61f28facf26", size = 261057, upload-time = "2026-02-09T12:57:52.447Z" }, + { url = "https://files.pythonhosted.org/packages/a4/a2/60010c669df5fa603bb5a97fb75407e191a846510da70ac657eb696b7fce/coverage-7.13.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb88b316ec33760714a4720feb2816a3a59180fd58c1985012054fa7aebee4c2", size = 263875, upload-time = "2026-02-09T12:57:53.938Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d9/63b22a6bdbd17f1f96e9ed58604c2a6b0e72a9133e37d663bef185877cf6/coverage-7.13.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7d41eead3cc673cbd38a4417deb7fd0b4ca26954ff7dc6078e33f6ff97bed940", size = 261500, upload-time = "2026-02-09T12:57:56.012Z" }, + { url = "https://files.pythonhosted.org/packages/70/bf/69f86ba1ad85bc3ad240e4c0e57a2e620fbc0e1645a47b5c62f0e941ad7f/coverage-7.13.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:fb26a934946a6afe0e326aebe0730cdff393a8bc0bbb65a2f41e30feddca399c", size = 265212, upload-time = "2026-02-09T12:57:57.5Z" }, + { url = "https://files.pythonhosted.org/packages/ae/f2/5f65a278a8c2148731831574c73e42f57204243d33bedaaf18fa79c5958f/coverage-7.13.4-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:dae88bc0fc77edaa65c14be099bd57ee140cf507e6bfdeea7938457ab387efb0", size = 260398, upload-time = "2026-02-09T12:57:59.027Z" }, + { url = "https://files.pythonhosted.org/packages/ef/80/6e8280a350ee9fea92f14b8357448a242dcaa243cb2c72ab0ca591f66c8c/coverage-7.13.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:845f352911777a8e722bfce168958214951e07e47e5d5d9744109fa5fe77f79b", size = 262584, upload-time = "2026-02-09T12:58:01.129Z" }, + { url = "https://files.pythonhosted.org/packages/22/63/01ff182fc95f260b539590fb12c11ad3e21332c15f9799cb5e2386f71d9f/coverage-7.13.4-cp313-cp313t-win32.whl", hash = "sha256:2fa8d5f8de70688a28240de9e139fa16b153cc3cbb01c5f16d88d6505ebdadf9", size = 222688, upload-time = "2026-02-09T12:58:02.736Z" }, + { url = "https://files.pythonhosted.org/packages/a9/43/89de4ef5d3cd53b886afa114065f7e9d3707bdb3e5efae13535b46ae483d/coverage-7.13.4-cp313-cp313t-win_amd64.whl", hash = "sha256:9351229c8c8407645840edcc277f4a2d44814d1bc34a2128c11c2a031d45a5dd", size = 223746, upload-time = "2026-02-09T12:58:05.362Z" }, + { url = "https://files.pythonhosted.org/packages/35/39/7cf0aa9a10d470a5309b38b289b9bb07ddeac5d61af9b664fe9775a4cb3e/coverage-7.13.4-cp313-cp313t-win_arm64.whl", hash = "sha256:30b8d0512f2dc8c8747557e8fb459d6176a2c9e5731e2b74d311c03b78451997", size = 222003, upload-time = "2026-02-09T12:58:06.952Z" }, + { url = "https://files.pythonhosted.org/packages/92/11/a9cf762bb83386467737d32187756a42094927150c3e107df4cb078e8590/coverage-7.13.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:300deaee342f90696ed186e3a00c71b5b3d27bffe9e827677954f4ee56969601", size = 219522, upload-time = "2026-02-09T12:58:08.623Z" }, + { url = "https://files.pythonhosted.org/packages/d3/28/56e6d892b7b052236d67c95f1936b6a7cf7c3e2634bf27610b8cbd7f9c60/coverage-7.13.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:29e3220258d682b6226a9b0925bc563ed9a1ebcff3cad30f043eceea7eaf2689", size = 219855, upload-time = "2026-02-09T12:58:10.176Z" }, + { url = "https://files.pythonhosted.org/packages/e5/69/233459ee9eb0c0d10fcc2fe425a029b3fa5ce0f040c966ebce851d030c70/coverage-7.13.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:391ee8f19bef69210978363ca930f7328081c6a0152f1166c91f0b5fdd2a773c", size = 250887, upload-time = "2026-02-09T12:58:12.503Z" }, + { url = "https://files.pythonhosted.org/packages/06/90/2cdab0974b9b5bbc1623f7876b73603aecac11b8d95b85b5b86b32de5eab/coverage-7.13.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0dd7ab8278f0d58a0128ba2fca25824321f05d059c1441800e934ff2efa52129", size = 253396, upload-time = "2026-02-09T12:58:14.615Z" }, + { url = "https://files.pythonhosted.org/packages/ac/15/ea4da0f85bf7d7b27635039e649e99deb8173fe551096ea15017f7053537/coverage-7.13.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78cdf0d578b15148b009ccf18c686aa4f719d887e76e6b40c38ffb61d264a552", size = 254745, upload-time = "2026-02-09T12:58:16.162Z" }, + { url = "https://files.pythonhosted.org/packages/99/11/bb356e86920c655ca4d61daee4e2bbc7258f0a37de0be32d233b561134ff/coverage-7.13.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:48685fee12c2eb3b27c62f2658e7ea21e9c3239cba5a8a242801a0a3f6a8c62a", size = 257055, upload-time = "2026-02-09T12:58:17.892Z" }, + { url = "https://files.pythonhosted.org/packages/c9/0f/9ae1f8cb17029e09da06ca4e28c9e1d5c1c0a511c7074592e37e0836c915/coverage-7.13.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4e83efc079eb39480e6346a15a1bcb3e9b04759c5202d157e1dd4303cd619356", size = 250911, upload-time = "2026-02-09T12:58:19.495Z" }, + { url = "https://files.pythonhosted.org/packages/89/3a/adfb68558fa815cbc29747b553bc833d2150228f251b127f1ce97e48547c/coverage-7.13.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ecae9737b72408d6a950f7e525f30aca12d4bd8dd95e37342e5beb3a2a8c4f71", size = 252754, upload-time = "2026-02-09T12:58:21.064Z" }, + { url = "https://files.pythonhosted.org/packages/32/b1/540d0c27c4e748bd3cd0bd001076ee416eda993c2bae47a73b7cc9357931/coverage-7.13.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ae4578f8528569d3cf303fef2ea569c7f4c4059a38c8667ccef15c6e1f118aa5", size = 250720, upload-time = "2026-02-09T12:58:22.622Z" }, + { url = "https://files.pythonhosted.org/packages/c7/95/383609462b3ffb1fe133014a7c84fc0dd01ed55ac6140fa1093b5af7ebb1/coverage-7.13.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:6fdef321fdfbb30a197efa02d48fcd9981f0d8ad2ae8903ac318adc653f5df98", size = 254994, upload-time = "2026-02-09T12:58:24.548Z" }, + { url = "https://files.pythonhosted.org/packages/f7/ba/1761138e86c81680bfc3c49579d66312865457f9fe405b033184e5793cb3/coverage-7.13.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b0f6ccf3dbe577170bebfce1318707d0e8c3650003cb4b3a9dd744575daa8b5", size = 250531, upload-time = "2026-02-09T12:58:26.271Z" }, + { url = "https://files.pythonhosted.org/packages/f8/8e/05900df797a9c11837ab59c4d6fe94094e029582aab75c3309a93e6fb4e3/coverage-7.13.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75fcd519f2a5765db3f0e391eb3b7d150cce1a771bf4c9f861aeab86c767a3c0", size = 252189, upload-time = "2026-02-09T12:58:27.807Z" }, + { url = "https://files.pythonhosted.org/packages/00/bd/29c9f2db9ea4ed2738b8a9508c35626eb205d51af4ab7bf56a21a2e49926/coverage-7.13.4-cp314-cp314-win32.whl", hash = "sha256:8e798c266c378da2bd819b0677df41ab46d78065fb2a399558f3f6cae78b2fbb", size = 222258, upload-time = "2026-02-09T12:58:29.441Z" }, + { url = "https://files.pythonhosted.org/packages/a7/4d/1f8e723f6829977410efeb88f73673d794075091c8c7c18848d273dc9d73/coverage-7.13.4-cp314-cp314-win_amd64.whl", hash = "sha256:245e37f664d89861cf2329c9afa2c1fe9e6d4e1a09d872c947e70718aeeac505", size = 223073, upload-time = "2026-02-09T12:58:31.026Z" }, + { url = "https://files.pythonhosted.org/packages/51/5b/84100025be913b44e082ea32abcf1afbf4e872f5120b7a1cab1d331b1e13/coverage-7.13.4-cp314-cp314-win_arm64.whl", hash = "sha256:ad27098a189e5838900ce4c2a99f2fe42a0bf0c2093c17c69b45a71579e8d4a2", size = 221638, upload-time = "2026-02-09T12:58:32.599Z" }, + { url = "https://files.pythonhosted.org/packages/a7/e4/c884a405d6ead1370433dad1e3720216b4f9fd8ef5b64bfd984a2a60a11a/coverage-7.13.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:85480adfb35ffc32d40918aad81b89c69c9cc5661a9b8a81476d3e645321a056", size = 220246, upload-time = "2026-02-09T12:58:34.181Z" }, + { url = "https://files.pythonhosted.org/packages/81/5c/4d7ed8b23b233b0fffbc9dfec53c232be2e695468523242ea9fd30f97ad2/coverage-7.13.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:79be69cf7f3bf9b0deeeb062eab7ac7f36cd4cc4c4dd694bd28921ba4d8596cc", size = 220514, upload-time = "2026-02-09T12:58:35.704Z" }, + { url = "https://files.pythonhosted.org/packages/2f/6f/3284d4203fd2f28edd73034968398cd2d4cb04ab192abc8cff007ea35679/coverage-7.13.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:caa421e2684e382c5d8973ac55e4f36bed6821a9bad5c953494de960c74595c9", size = 261877, upload-time = "2026-02-09T12:58:37.864Z" }, + { url = "https://files.pythonhosted.org/packages/09/aa/b672a647bbe1556a85337dc95bfd40d146e9965ead9cc2fe81bde1e5cbce/coverage-7.13.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:14375934243ee05f56c45393fe2ce81fe5cc503c07cee2bdf1725fb8bef3ffaf", size = 264004, upload-time = "2026-02-09T12:58:39.492Z" }, + { url = "https://files.pythonhosted.org/packages/79/a1/aa384dbe9181f98bba87dd23dda436f0c6cf2e148aecbb4e50fc51c1a656/coverage-7.13.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:25a41c3104d08edb094d9db0d905ca54d0cd41c928bb6be3c4c799a54753af55", size = 266408, upload-time = "2026-02-09T12:58:41.852Z" }, + { url = "https://files.pythonhosted.org/packages/53/5e/5150bf17b4019bc600799f376bb9606941e55bd5a775dc1e096b6ffea952/coverage-7.13.4-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6f01afcff62bf9a08fb32b2c1d6e924236c0383c02c790732b6537269e466a72", size = 267544, upload-time = "2026-02-09T12:58:44.093Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ed/f1de5c675987a4a7a672250d2c5c9d73d289dbf13410f00ed7181d8017dd/coverage-7.13.4-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eb9078108fbf0bcdde37c3f4779303673c2fa1fe8f7956e68d447d0dd426d38a", size = 260980, upload-time = "2026-02-09T12:58:45.721Z" }, + { url = "https://files.pythonhosted.org/packages/b3/e3/fe758d01850aa172419a6743fe76ba8b92c29d181d4f676ffe2dae2ba631/coverage-7.13.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0e086334e8537ddd17e5f16a344777c1ab8194986ec533711cbe6c41cde841b6", size = 263871, upload-time = "2026-02-09T12:58:47.334Z" }, + { url = "https://files.pythonhosted.org/packages/b6/76/b829869d464115e22499541def9796b25312b8cf235d3bb00b39f1675395/coverage-7.13.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:725d985c5ab621268b2edb8e50dfe57633dc69bda071abc470fed55a14935fd3", size = 261472, upload-time = "2026-02-09T12:58:48.995Z" }, + { url = "https://files.pythonhosted.org/packages/14/9e/caedb1679e73e2f6ad240173f55218488bfe043e38da577c4ec977489915/coverage-7.13.4-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3c06f0f1337c667b971ca2f975523347e63ec5e500b9aa5882d91931cd3ef750", size = 265210, upload-time = "2026-02-09T12:58:51.178Z" }, + { url = "https://files.pythonhosted.org/packages/3a/10/0dd02cb009b16ede425b49ec344aba13a6ae1dc39600840ea6abcb085ac4/coverage-7.13.4-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:590c0ed4bf8e85f745e6b805b2e1c457b2e33d5255dd9729743165253bc9ad39", size = 260319, upload-time = "2026-02-09T12:58:53.081Z" }, + { url = "https://files.pythonhosted.org/packages/92/8e/234d2c927af27c6d7a5ffad5bd2cf31634c46a477b4c7adfbfa66baf7ebb/coverage-7.13.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:eb30bf180de3f632cd043322dad5751390e5385108b2807368997d1a92a509d0", size = 262638, upload-time = "2026-02-09T12:58:55.258Z" }, + { url = "https://files.pythonhosted.org/packages/2f/64/e5547c8ff6964e5965c35a480855911b61509cce544f4d442caa759a0702/coverage-7.13.4-cp314-cp314t-win32.whl", hash = "sha256:c4240e7eded42d131a2d2c4dec70374b781b043ddc79a9de4d55ca71f8e98aea", size = 223040, upload-time = "2026-02-09T12:58:56.936Z" }, + { url = "https://files.pythonhosted.org/packages/c7/96/38086d58a181aac86d503dfa9c47eb20715a79c3e3acbdf786e92e5c09a8/coverage-7.13.4-cp314-cp314t-win_amd64.whl", hash = "sha256:4c7d3cc01e7350f2f0f6f7036caaf5673fb56b6998889ccfe9e1c1fe75a9c932", size = 224148, upload-time = "2026-02-09T12:58:58.645Z" }, + { url = "https://files.pythonhosted.org/packages/ce/72/8d10abd3740a0beb98c305e0c3faf454366221c0f37a8bcf8f60020bb65a/coverage-7.13.4-cp314-cp314t-win_arm64.whl", hash = "sha256:23e3f687cf945070d1c90f85db66d11e3025665d8dafa831301a0e0038f3db9b", size = 222172, upload-time = "2026-02-09T12:59:00.396Z" }, + { url = "https://files.pythonhosted.org/packages/0d/4a/331fe2caf6799d591109bb9c08083080f6de90a823695d412a935622abb2/coverage-7.13.4-py3-none-any.whl", hash = "sha256:1af1641e57cf7ba1bd67d677c9abdbcd6cc2ab7da3bca7fa1e2b7e50e65f2ad0", size = 211242, upload-time = "2026-02-09T12:59:02.032Z" }, ] [package.optional-dependencies] @@ -746,62 +769,62 @@ toml = [ [[package]] name = "cryptography" -version = "46.0.4" +version = "46.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/78/19/f748958276519adf6a0c1e79e7b8860b4830dda55ccdf29f2719b5fc499c/cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59", size = 749301, upload-time = "2026-01-28T00:24:37.379Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8d/99/157aae7949a5f30d51fcb1a9851e8ebd5c74bf99b5285d8bb4b8b9ee641e/cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485", size = 7173686, upload-time = "2026-01-28T00:23:07.515Z" }, - { url = "https://files.pythonhosted.org/packages/87/91/874b8910903159043b5c6a123b7e79c4559ddd1896e38967567942635778/cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc", size = 4275871, upload-time = "2026-01-28T00:23:09.439Z" }, - { url = "https://files.pythonhosted.org/packages/c0/35/690e809be77896111f5b195ede56e4b4ed0435b428c2f2b6d35046fbb5e8/cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0", size = 4423124, upload-time = "2026-01-28T00:23:11.529Z" }, - { url = "https://files.pythonhosted.org/packages/1a/5b/a26407d4f79d61ca4bebaa9213feafdd8806dc69d3d290ce24996d3cfe43/cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa", size = 4277090, upload-time = "2026-01-28T00:23:13.123Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d8/4bb7aec442a9049827aa34cee1aa83803e528fa55da9a9d45d01d1bb933e/cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81", size = 4947652, upload-time = "2026-01-28T00:23:14.554Z" }, - { url = "https://files.pythonhosted.org/packages/2b/08/f83e2e0814248b844265802d081f2fac2f1cbe6cd258e72ba14ff006823a/cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255", size = 4455157, upload-time = "2026-01-28T00:23:16.443Z" }, - { url = "https://files.pythonhosted.org/packages/0a/05/19d849cf4096448779d2dcc9bb27d097457dac36f7273ffa875a93b5884c/cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e", size = 3981078, upload-time = "2026-01-28T00:23:17.838Z" }, - { url = "https://files.pythonhosted.org/packages/e6/89/f7bac81d66ba7cde867a743ea5b37537b32b5c633c473002b26a226f703f/cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c", size = 4276213, upload-time = "2026-01-28T00:23:19.257Z" }, - { url = "https://files.pythonhosted.org/packages/da/9f/7133e41f24edd827020ad21b068736e792bc68eecf66d93c924ad4719fb3/cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32", size = 4912190, upload-time = "2026-01-28T00:23:21.244Z" }, - { url = "https://files.pythonhosted.org/packages/a6/f7/6d43cbaddf6f65b24816e4af187d211f0bc536a29961f69faedc48501d8e/cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616", size = 4454641, upload-time = "2026-01-28T00:23:22.866Z" }, - { url = "https://files.pythonhosted.org/packages/9e/4f/ebd0473ad656a0ac912a16bd07db0f5d85184924e14fc88feecae2492834/cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0", size = 4405159, upload-time = "2026-01-28T00:23:25.278Z" }, - { url = "https://files.pythonhosted.org/packages/d1/f7/7923886f32dc47e27adeff8246e976d77258fd2aa3efdd1754e4e323bf49/cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0", size = 4666059, upload-time = "2026-01-28T00:23:26.766Z" }, - { url = "https://files.pythonhosted.org/packages/eb/a7/0fca0fd3591dffc297278a61813d7f661a14243dd60f499a7a5b48acb52a/cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5", size = 3026378, upload-time = "2026-01-28T00:23:28.317Z" }, - { url = "https://files.pythonhosted.org/packages/2d/12/652c84b6f9873f0909374864a57b003686c642ea48c84d6c7e2c515e6da5/cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b", size = 3478614, upload-time = "2026-01-28T00:23:30.275Z" }, - { url = "https://files.pythonhosted.org/packages/b9/27/542b029f293a5cce59349d799d4d8484b3b1654a7b9a0585c266e974a488/cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908", size = 7116417, upload-time = "2026-01-28T00:23:31.958Z" }, - { url = "https://files.pythonhosted.org/packages/f8/f5/559c25b77f40b6bf828eabaf988efb8b0e17b573545edb503368ca0a2a03/cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da", size = 4264508, upload-time = "2026-01-28T00:23:34.264Z" }, - { url = "https://files.pythonhosted.org/packages/49/a1/551fa162d33074b660dc35c9bc3616fefa21a0e8c1edd27b92559902e408/cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829", size = 4409080, upload-time = "2026-01-28T00:23:35.793Z" }, - { url = "https://files.pythonhosted.org/packages/b0/6a/4d8d129a755f5d6df1bbee69ea2f35ebfa954fa1847690d1db2e8bca46a5/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2", size = 4270039, upload-time = "2026-01-28T00:23:37.263Z" }, - { url = "https://files.pythonhosted.org/packages/4c/f5/ed3fcddd0a5e39321e595e144615399e47e7c153a1fb8c4862aec3151ff9/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085", size = 4926748, upload-time = "2026-01-28T00:23:38.884Z" }, - { url = "https://files.pythonhosted.org/packages/43/ae/9f03d5f0c0c00e85ecb34f06d3b79599f20630e4db91b8a6e56e8f83d410/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b", size = 4442307, upload-time = "2026-01-28T00:23:40.56Z" }, - { url = "https://files.pythonhosted.org/packages/8b/22/e0f9f2dae8040695103369cf2283ef9ac8abe4d51f68710bec2afd232609/cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd", size = 3959253, upload-time = "2026-01-28T00:23:42.827Z" }, - { url = "https://files.pythonhosted.org/packages/01/5b/6a43fcccc51dae4d101ac7d378a8724d1ba3de628a24e11bf2f4f43cba4d/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2", size = 4269372, upload-time = "2026-01-28T00:23:44.655Z" }, - { url = "https://files.pythonhosted.org/packages/17/b7/0f6b8c1dd0779df2b526e78978ff00462355e31c0a6f6cff8a3e99889c90/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e", size = 4891908, upload-time = "2026-01-28T00:23:46.48Z" }, - { url = "https://files.pythonhosted.org/packages/83/17/259409b8349aa10535358807a472c6a695cf84f106022268d31cea2b6c97/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f", size = 4441254, upload-time = "2026-01-28T00:23:48.403Z" }, - { url = "https://files.pythonhosted.org/packages/9c/fe/e4a1b0c989b00cee5ffa0764401767e2d1cf59f45530963b894129fd5dce/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82", size = 4396520, upload-time = "2026-01-28T00:23:50.26Z" }, - { url = "https://files.pythonhosted.org/packages/b3/81/ba8fd9657d27076eb40d6a2f941b23429a3c3d2f56f5a921d6b936a27bc9/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c", size = 4651479, upload-time = "2026-01-28T00:23:51.674Z" }, - { url = "https://files.pythonhosted.org/packages/00/03/0de4ed43c71c31e4fe954edd50b9d28d658fef56555eba7641696370a8e2/cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061", size = 3001986, upload-time = "2026-01-28T00:23:53.485Z" }, - { url = "https://files.pythonhosted.org/packages/5c/70/81830b59df7682917d7a10f833c4dab2a5574cd664e86d18139f2b421329/cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7", size = 3468288, upload-time = "2026-01-28T00:23:55.09Z" }, - { url = "https://files.pythonhosted.org/packages/56/f7/f648fdbb61d0d45902d3f374217451385edc7e7768d1b03ff1d0e5ffc17b/cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab", size = 7169583, upload-time = "2026-01-28T00:23:56.558Z" }, - { url = "https://files.pythonhosted.org/packages/d8/cc/8f3224cbb2a928de7298d6ed4790f5ebc48114e02bdc9559196bfb12435d/cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef", size = 4275419, upload-time = "2026-01-28T00:23:58.364Z" }, - { url = "https://files.pythonhosted.org/packages/17/43/4a18faa7a872d00e4264855134ba82d23546c850a70ff209e04ee200e76f/cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d", size = 4419058, upload-time = "2026-01-28T00:23:59.867Z" }, - { url = "https://files.pythonhosted.org/packages/ee/64/6651969409821d791ba12346a124f55e1b76f66a819254ae840a965d4b9c/cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973", size = 4278151, upload-time = "2026-01-28T00:24:01.731Z" }, - { url = "https://files.pythonhosted.org/packages/20/0b/a7fce65ee08c3c02f7a8310cc090a732344066b990ac63a9dfd0a655d321/cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4", size = 4939441, upload-time = "2026-01-28T00:24:03.175Z" }, - { url = "https://files.pythonhosted.org/packages/db/a7/20c5701e2cd3e1dfd7a19d2290c522a5f435dd30957d431dcb531d0f1413/cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af", size = 4451617, upload-time = "2026-01-28T00:24:05.403Z" }, - { url = "https://files.pythonhosted.org/packages/00/dc/3e16030ea9aa47b63af6524c354933b4fb0e352257c792c4deeb0edae367/cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263", size = 3977774, upload-time = "2026-01-28T00:24:06.851Z" }, - { url = "https://files.pythonhosted.org/packages/42/c8/ad93f14118252717b465880368721c963975ac4b941b7ef88f3c56bf2897/cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095", size = 4277008, upload-time = "2026-01-28T00:24:08.926Z" }, - { url = "https://files.pythonhosted.org/packages/00/cf/89c99698151c00a4631fbfcfcf459d308213ac29e321b0ff44ceeeac82f1/cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b", size = 4903339, upload-time = "2026-01-28T00:24:12.009Z" }, - { url = "https://files.pythonhosted.org/packages/03/c3/c90a2cb358de4ac9309b26acf49b2a100957e1ff5cc1e98e6c4996576710/cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019", size = 4451216, upload-time = "2026-01-28T00:24:13.975Z" }, - { url = "https://files.pythonhosted.org/packages/96/2c/8d7f4171388a10208671e181ca43cdc0e596d8259ebacbbcfbd16de593da/cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4", size = 4404299, upload-time = "2026-01-28T00:24:16.169Z" }, - { url = "https://files.pythonhosted.org/packages/e9/23/cbb2036e450980f65c6e0a173b73a56ff3bccd8998965dea5cc9ddd424a5/cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b", size = 4664837, upload-time = "2026-01-28T00:24:17.629Z" }, - { url = "https://files.pythonhosted.org/packages/0a/21/f7433d18fe6d5845329cbdc597e30caf983229c7a245bcf54afecc555938/cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc", size = 3009779, upload-time = "2026-01-28T00:24:20.198Z" }, - { url = "https://files.pythonhosted.org/packages/3a/6a/bd2e7caa2facffedf172a45c1a02e551e6d7d4828658c9a245516a598d94/cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976", size = 3466633, upload-time = "2026-01-28T00:24:21.851Z" }, - { url = "https://files.pythonhosted.org/packages/59/e0/f9c6c53e1f2a1c2507f00f2faba00f01d2f334b35b0fbfe5286715da2184/cryptography-46.0.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:766330cce7416c92b5e90c3bb71b1b79521760cdcfc3a6a1a182d4c9fab23d2b", size = 3476316, upload-time = "2026-01-28T00:24:24.144Z" }, - { url = "https://files.pythonhosted.org/packages/27/7a/f8d2d13227a9a1a9fe9c7442b057efecffa41f1e3c51d8622f26b9edbe8f/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c236a44acfb610e70f6b3e1c3ca20ff24459659231ef2f8c48e879e2d32b73da", size = 4216693, upload-time = "2026-01-28T00:24:25.758Z" }, - { url = "https://files.pythonhosted.org/packages/c5/de/3787054e8f7972658370198753835d9d680f6cd4a39df9f877b57f0dd69c/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8a15fb869670efa8f83cbffbc8753c1abf236883225aed74cd179b720ac9ec80", size = 4382765, upload-time = "2026-01-28T00:24:27.577Z" }, - { url = "https://files.pythonhosted.org/packages/8a/5f/60e0afb019973ba6a0b322e86b3d61edf487a4f5597618a430a2a15f2d22/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:fdc3daab53b212472f1524d070735b2f0c214239df131903bae1d598016fa822", size = 4216066, upload-time = "2026-01-28T00:24:29.056Z" }, - { url = "https://files.pythonhosted.org/packages/81/8e/bf4a0de294f147fee66f879d9bae6f8e8d61515558e3d12785dd90eca0be/cryptography-46.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:44cc0675b27cadb71bdbb96099cca1fa051cd11d2ade09e5cd3a2edb929ed947", size = 4382025, upload-time = "2026-01-28T00:24:30.681Z" }, - { url = "https://files.pythonhosted.org/packages/79/f4/9ceb90cfd6a3847069b0b0b353fd3075dc69b49defc70182d8af0c4ca390/cryptography-46.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:be8c01a7d5a55f9a47d1888162b76c8f49d62b234d88f0ff91a9fbebe32ffbc3", size = 3406043, upload-time = "2026-01-28T00:24:32.236Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/60/04/ee2a9e8542e4fa2773b81771ff8349ff19cdd56b7258a0cc442639052edb/cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d", size = 750064, upload-time = "2026-02-10T19:18:38.255Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/81/b0bb27f2ba931a65409c6b8a8b358a7f03c0e46eceacddff55f7c84b1f3b/cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad", size = 7176289, upload-time = "2026-02-10T19:17:08.274Z" }, + { url = "https://files.pythonhosted.org/packages/ff/9e/6b4397a3e3d15123de3b1806ef342522393d50736c13b20ec4c9ea6693a6/cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b", size = 4275637, upload-time = "2026-02-10T19:17:10.53Z" }, + { url = "https://files.pythonhosted.org/packages/63/e7/471ab61099a3920b0c77852ea3f0ea611c9702f651600397ac567848b897/cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b", size = 4424742, upload-time = "2026-02-10T19:17:12.388Z" }, + { url = "https://files.pythonhosted.org/packages/37/53/a18500f270342d66bf7e4d9f091114e31e5ee9e7375a5aba2e85a91e0044/cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263", size = 4277528, upload-time = "2026-02-10T19:17:13.853Z" }, + { url = "https://files.pythonhosted.org/packages/22/29/c2e812ebc38c57b40e7c583895e73c8c5adb4d1e4a0cc4c5a4fdab2b1acc/cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d", size = 4947993, upload-time = "2026-02-10T19:17:15.618Z" }, + { url = "https://files.pythonhosted.org/packages/6b/e7/237155ae19a9023de7e30ec64e5d99a9431a567407ac21170a046d22a5a3/cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed", size = 4456855, upload-time = "2026-02-10T19:17:17.221Z" }, + { url = "https://files.pythonhosted.org/packages/2d/87/fc628a7ad85b81206738abbd213b07702bcbdada1dd43f72236ef3cffbb5/cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2", size = 3984635, upload-time = "2026-02-10T19:17:18.792Z" }, + { url = "https://files.pythonhosted.org/packages/84/29/65b55622bde135aedf4565dc509d99b560ee4095e56989e815f8fd2aa910/cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2", size = 4277038, upload-time = "2026-02-10T19:17:20.256Z" }, + { url = "https://files.pythonhosted.org/packages/bc/36/45e76c68d7311432741faf1fbf7fac8a196a0a735ca21f504c75d37e2558/cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0", size = 4912181, upload-time = "2026-02-10T19:17:21.825Z" }, + { url = "https://files.pythonhosted.org/packages/6d/1a/c1ba8fead184d6e3d5afcf03d569acac5ad063f3ac9fb7258af158f7e378/cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731", size = 4456482, upload-time = "2026-02-10T19:17:25.133Z" }, + { url = "https://files.pythonhosted.org/packages/f9/e5/3fb22e37f66827ced3b902cf895e6a6bc1d095b5b26be26bd13c441fdf19/cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82", size = 4405497, upload-time = "2026-02-10T19:17:26.66Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/9d58bb32b1121a8a2f27383fabae4d63080c7ca60b9b5c88be742be04ee7/cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1", size = 4667819, upload-time = "2026-02-10T19:17:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ed/325d2a490c5e94038cdb0117da9397ece1f11201f425c4e9c57fe5b9f08b/cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48", size = 3028230, upload-time = "2026-02-10T19:17:30.518Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5a/ac0f49e48063ab4255d9e3b79f5def51697fce1a95ea1370f03dc9db76f6/cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4", size = 3480909, upload-time = "2026-02-10T19:17:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/00/13/3d278bfa7a15a96b9dc22db5a12ad1e48a9eb3d40e1827ef66a5df75d0d0/cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2", size = 7119287, upload-time = "2026-02-10T19:17:33.801Z" }, + { url = "https://files.pythonhosted.org/packages/67/c8/581a6702e14f0898a0848105cbefd20c058099e2c2d22ef4e476dfec75d7/cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678", size = 4265728, upload-time = "2026-02-10T19:17:35.569Z" }, + { url = "https://files.pythonhosted.org/packages/dd/4a/ba1a65ce8fc65435e5a849558379896c957870dd64fecea97b1ad5f46a37/cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87", size = 4408287, upload-time = "2026-02-10T19:17:36.938Z" }, + { url = "https://files.pythonhosted.org/packages/f8/67/8ffdbf7b65ed1ac224d1c2df3943553766914a8ca718747ee3871da6107e/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee", size = 4270291, upload-time = "2026-02-10T19:17:38.748Z" }, + { url = "https://files.pythonhosted.org/packages/f8/e5/f52377ee93bc2f2bba55a41a886fd208c15276ffbd2569f2ddc89d50e2c5/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981", size = 4927539, upload-time = "2026-02-10T19:17:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/3b/02/cfe39181b02419bbbbcf3abdd16c1c5c8541f03ca8bda240debc467d5a12/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9", size = 4442199, upload-time = "2026-02-10T19:17:41.789Z" }, + { url = "https://files.pythonhosted.org/packages/c0/96/2fcaeb4873e536cf71421a388a6c11b5bc846e986b2b069c79363dc1648e/cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648", size = 3960131, upload-time = "2026-02-10T19:17:43.379Z" }, + { url = "https://files.pythonhosted.org/packages/d8/d2/b27631f401ddd644e94c5cf33c9a4069f72011821cf3dc7309546b0642a0/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4", size = 4270072, upload-time = "2026-02-10T19:17:45.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/a7/60d32b0370dae0b4ebe55ffa10e8599a2a59935b5ece1b9f06edb73abdeb/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0", size = 4892170, upload-time = "2026-02-10T19:17:46.997Z" }, + { url = "https://files.pythonhosted.org/packages/d2/b9/cf73ddf8ef1164330eb0b199a589103c363afa0cf794218c24d524a58eab/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663", size = 4441741, upload-time = "2026-02-10T19:17:48.661Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/eee00b28c84c726fe8fa0158c65afe312d9c3b78d9d01daf700f1f6e37ff/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826", size = 4396728, upload-time = "2026-02-10T19:17:50.058Z" }, + { url = "https://files.pythonhosted.org/packages/65/f4/6bc1a9ed5aef7145045114b75b77c2a8261b4d38717bd8dea111a63c3442/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d", size = 4652001, upload-time = "2026-02-10T19:17:51.54Z" }, + { url = "https://files.pythonhosted.org/packages/86/ef/5d00ef966ddd71ac2e6951d278884a84a40ffbd88948ef0e294b214ae9e4/cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a", size = 3003637, upload-time = "2026-02-10T19:17:52.997Z" }, + { url = "https://files.pythonhosted.org/packages/b7/57/f3f4160123da6d098db78350fdfd9705057aad21de7388eacb2401dceab9/cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4", size = 3469487, upload-time = "2026-02-10T19:17:54.549Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fa/a66aa722105ad6a458bebd64086ca2b72cdd361fed31763d20390f6f1389/cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31", size = 7170514, upload-time = "2026-02-10T19:17:56.267Z" }, + { url = "https://files.pythonhosted.org/packages/0f/04/c85bdeab78c8bc77b701bf0d9bdcf514c044e18a46dcff330df5448631b0/cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18", size = 4275349, upload-time = "2026-02-10T19:17:58.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/32/9b87132a2f91ee7f5223b091dc963055503e9b442c98fc0b8a5ca765fab0/cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235", size = 4420667, upload-time = "2026-02-10T19:18:00.619Z" }, + { url = "https://files.pythonhosted.org/packages/a1/a6/a7cb7010bec4b7c5692ca6f024150371b295ee1c108bdc1c400e4c44562b/cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a", size = 4276980, upload-time = "2026-02-10T19:18:02.379Z" }, + { url = "https://files.pythonhosted.org/packages/8e/7c/c4f45e0eeff9b91e3f12dbd0e165fcf2a38847288fcfd889deea99fb7b6d/cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76", size = 4939143, upload-time = "2026-02-10T19:18:03.964Z" }, + { url = "https://files.pythonhosted.org/packages/37/19/e1b8f964a834eddb44fa1b9a9976f4e414cbb7aa62809b6760c8803d22d1/cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614", size = 4453674, upload-time = "2026-02-10T19:18:05.588Z" }, + { url = "https://files.pythonhosted.org/packages/db/ed/db15d3956f65264ca204625597c410d420e26530c4e2943e05a0d2f24d51/cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229", size = 3978801, upload-time = "2026-02-10T19:18:07.167Z" }, + { url = "https://files.pythonhosted.org/packages/41/e2/df40a31d82df0a70a0daf69791f91dbb70e47644c58581d654879b382d11/cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1", size = 4276755, upload-time = "2026-02-10T19:18:09.813Z" }, + { url = "https://files.pythonhosted.org/packages/33/45/726809d1176959f4a896b86907b98ff4391a8aa29c0aaaf9450a8a10630e/cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d", size = 4901539, upload-time = "2026-02-10T19:18:11.263Z" }, + { url = "https://files.pythonhosted.org/packages/99/0f/a3076874e9c88ecb2ecc31382f6e7c21b428ede6f55aafa1aa272613e3cd/cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c", size = 4452794, upload-time = "2026-02-10T19:18:12.914Z" }, + { url = "https://files.pythonhosted.org/packages/02/ef/ffeb542d3683d24194a38f66ca17c0a4b8bf10631feef44a7ef64e631b1a/cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4", size = 4404160, upload-time = "2026-02-10T19:18:14.375Z" }, + { url = "https://files.pythonhosted.org/packages/96/93/682d2b43c1d5f1406ed048f377c0fc9fc8f7b0447a478d5c65ab3d3a66eb/cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9", size = 4667123, upload-time = "2026-02-10T19:18:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/45/2d/9c5f2926cb5300a8eefc3f4f0b3f3df39db7f7ce40c8365444c49363cbda/cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72", size = 3010220, upload-time = "2026-02-10T19:18:17.361Z" }, + { url = "https://files.pythonhosted.org/packages/48/ef/0c2f4a8e31018a986949d34a01115dd057bf536905dca38897bacd21fac3/cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595", size = 3467050, upload-time = "2026-02-10T19:18:18.899Z" }, + { url = "https://files.pythonhosted.org/packages/eb/dd/2d9fdb07cebdf3d51179730afb7d5e576153c6744c3ff8fded23030c204e/cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c", size = 3476964, upload-time = "2026-02-10T19:18:20.687Z" }, + { url = "https://files.pythonhosted.org/packages/e9/6f/6cc6cc9955caa6eaf83660b0da2b077c7fe8ff9950a3c5e45d605038d439/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a", size = 4218321, upload-time = "2026-02-10T19:18:22.349Z" }, + { url = "https://files.pythonhosted.org/packages/3e/5d/c4da701939eeee699566a6c1367427ab91a8b7088cc2328c09dbee940415/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356", size = 4381786, upload-time = "2026-02-10T19:18:24.529Z" }, + { url = "https://files.pythonhosted.org/packages/ac/97/a538654732974a94ff96c1db621fa464f455c02d4bb7d2652f4edc21d600/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da", size = 4217990, upload-time = "2026-02-10T19:18:25.957Z" }, + { url = "https://files.pythonhosted.org/packages/ae/11/7e500d2dd3ba891197b9efd2da5454b74336d64a7cc419aa7327ab74e5f6/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257", size = 4381252, upload-time = "2026-02-10T19:18:27.496Z" }, + { url = "https://files.pythonhosted.org/packages/bc/58/6b3d24e6b9bc474a2dcdee65dfd1f008867015408a271562e4b690561a4d/cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7", size = 3407605, upload-time = "2026-02-10T19:18:29.233Z" }, ] [[package]] @@ -823,10 +846,19 @@ wheels = [ [[package]] name = "cuda-pathfinder" -version = "1.3.3" +version = "1.3.4" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0b/02/4dbe7568a42e46582248942f54dc64ad094769532adbe21e525e4edf7bc4/cuda_pathfinder-1.3.3-py3-none-any.whl", hash = "sha256:9984b664e404f7c134954a771be8775dfd6180ea1e1aef4a5a37d4be05d9bbb1", size = 27154, upload-time = "2025-12-04T22:35:08.996Z" }, + { url = "https://files.pythonhosted.org/packages/b8/5e/db279a3bfbd18d59d0598922a3b3c1454908d0969e8372260afec9736376/cuda_pathfinder-1.3.4-py3-none-any.whl", hash = "sha256:fb983f6e0d43af27ef486e14d5989b5f904ef45cedf40538bfdcbffa6bb01fb2", size = 30878, upload-time = "2026-02-11T18:50:31.008Z" }, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", size = 75520, upload-time = "2021-03-08T10:59:26.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" }, ] [[package]] @@ -858,12 +890,13 @@ wheels = [ [[package]] name = "docling" -version = "2.72.0" +version = "2.74.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "accelerate" }, { name = "beautifulsoup4" }, { name = "certifi" }, + { name = "defusedxml" }, { name = "docling-core", extra = ["chunking"] }, { name = "docling-ibm-models" }, { name = "docling-parse" }, @@ -891,16 +924,17 @@ dependencies = [ { name = "tqdm" }, { name = "typer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/32/01/4ac01f90e56297a8476399df2ec31b26e073a20b20aa5641a4b8f080a2f4/docling-2.72.0.tar.gz", hash = "sha256:3edd48bb7b6e5737647441b96fba8811f69cac495883fa28dbf2444a0322cabd", size = 284463, upload-time = "2026-02-03T15:09:42.752Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e6/77/8a3159ab194be676fe0b02e492545989001a0eeda5819a1039394bac891b/docling-2.74.0.tar.gz", hash = "sha256:107e7e0854c1ecb56c6381a07f87d587c3606386ca56f0d22917cb49e1349468", size = 346532, upload-time = "2026-02-17T21:17:54.87Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c4/be/e740484ca9d223a4bc740e834b07bfc4f28e0223c2b513f7012d4d7e00ab/docling-2.72.0-py3-none-any.whl", hash = "sha256:261a5fdfa3276783c18a29aa0ec623b259450765421bebeec87de523f07adcf3", size = 304007, upload-time = "2026-02-03T15:09:41.311Z" }, + { url = "https://files.pythonhosted.org/packages/d5/32/005d950fc606d58c8702e9d15fdbe7c92e4e97bdd93216402e8cc68c8f93/docling-2.74.0-py3-none-any.whl", hash = "sha256:931e9a522cd295cf523eebc3b52e507c7cf8709e0e7e9a7a84192051f3d62848", size = 370414, upload-time = "2026-02-17T21:17:53.501Z" }, ] [[package]] name = "docling-core" -version = "2.63.0" +version = "2.65.1" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "defusedxml" }, { name = "jsonref" }, { name = "jsonschema" }, { name = "latex2mathml" }, @@ -912,9 +946,9 @@ dependencies = [ { name = "typer" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d3/76/f6a1333c0ce4c20e60358185ff8b7fa92e1e1561a43a6788e7c8aaa9898e/docling_core-2.63.0.tar.gz", hash = "sha256:946cf97f27cb81a2c6507121045a356be91e40b5a06bbaf028ca7036df78b2f1", size = 251016, upload-time = "2026-02-03T14:41:07.158Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/f0/54e61a05728f6e44f45092ab115b11b24464b64274f8a5fe8fcfe90ac70d/docling_core-2.65.1.tar.gz", hash = "sha256:3a143adb9cc613c503380eff92f5895078fc5a00fc7264f327d0d85ff60176cd", size = 253164, upload-time = "2026-02-13T12:23:06.472Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/c4/0c825b46412f088828dd2730d231c745d1ff4b5537eed292e827103eff37/docling_core-2.63.0-py3-none-any.whl", hash = "sha256:8f39167bf17da13225c8a67d23df98c87a74e2ab39762dbf51fab93d9b90de25", size = 238637, upload-time = "2026-02-03T14:41:05.55Z" }, + { url = "https://files.pythonhosted.org/packages/54/ee/2a450f8cf9a153dd080551b1ff09d45f200d629c30917b2047ecb8e1f6b6/docling_core-2.65.1-py3-none-any.whl", hash = "sha256:fcdb30254bc5046b52b8bec5919de3b6cdbeed915399cede5351ff328cdd020d", size = 240086, upload-time = "2026-02-13T12:23:04.289Z" }, ] [package.optional-dependencies] @@ -955,7 +989,7 @@ wheels = [ [[package]] name = "docling-parse" -version = "4.7.3" +version = "5.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docling-core" }, @@ -964,29 +998,28 @@ dependencies = [ { name = "pywin32", marker = "sys_platform == 'win32'" }, { name = "tabulate" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/bb/7a/653c3b11920113217724fab9b4740f9f8964864f92a2a27590accecec5ac/docling_parse-4.7.3.tar.gz", hash = "sha256:5936e6bcb7969c2a13f38ecc75cada3b0919422dc845e96da4b0b7b3bbc394ce", size = 67646746, upload-time = "2026-01-14T14:18:19.376Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6d/21/98decb689c173763f9a089e221c68b36d7b67ace0759f8eb2c9ca4b98dd5/docling_parse-4.7.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:65e0653d9617d38e73bab069dc3e7960668ff4a6b0ff45a7635c3790eeed8a08", size = 14614450, upload-time = "2026-01-14T14:17:21.626Z" }, - { url = "https://files.pythonhosted.org/packages/b2/88/c7642d019b6932b294ac3aae0208b2998fc0b7690473d12b1aa56636c99f/docling_parse-4.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:978e7e7032760385264896871ae87cb3a04081766cc966c57e9750ce803162ac", size = 15063165, upload-time = "2026-01-14T14:17:24.337Z" }, - { url = "https://files.pythonhosted.org/packages/df/3d/a169dd9de8ed5f8edae2bbfd6528306ece67994813224bb0da7a6f694a5f/docling_parse-4.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1790e7e4ae202d67875c1c48fd6f8ef5c51d10b0c23157e4989b8673f2f31308", size = 15136333, upload-time = "2026-01-14T14:17:26.21Z" }, - { url = "https://files.pythonhosted.org/packages/aa/b5/b600c4a040f57b7876878550551a8a92000ffedc58f716c384e1a09ec085/docling_parse-4.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:5fc8f4770f9f6f90ba25f52451864a64394ddb158aea3a8fdda46a208c029cf6", size = 16144041, upload-time = "2026-01-14T14:17:28.108Z" }, - { url = "https://files.pythonhosted.org/packages/6c/81/dd317e0bce475153dc08a60a9a8615b1a04d4d3c9803175e6cb7b7e9b49b/docling_parse-4.7.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:66896bbe925073e4d48f18ec29dcd611a390d6b2378fae72125e77b020cd5664", size = 14615974, upload-time = "2026-01-14T14:17:30.246Z" }, - { url = "https://files.pythonhosted.org/packages/3a/b5/088590e0b32fd0a393ca419c644d1435a1c99fa6b2a87888eef4d0fdea33/docling_parse-4.7.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:281347b3e937c1a5ffa6f8774ee603b64a0899fe8a6885573dec7eb48a3421d8", size = 14981051, upload-time = "2026-01-14T14:17:32.426Z" }, - { url = "https://files.pythonhosted.org/packages/b7/63/2b6c9127924487573d5419d58ec77955f0b7c0a923c8232ad461d71039aa/docling_parse-4.7.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3d86c51f9ce35a1b40b2f410f7271d9bd5fc58e7240f4cae7fdd2cef757e671", size = 15092586, upload-time = "2026-01-14T14:17:34.634Z" }, - { url = "https://files.pythonhosted.org/packages/af/89/ed27a83eb113bdf0b0f82f3c30a0db3c005df58b236f6487b232dacdb57a/docling_parse-4.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:3b04459cc97a8a4929622e341b9981e23987a63af07db599afc5e1c4d389060b", size = 16144866, upload-time = "2026-01-14T14:17:36.742Z" }, - { url = "https://files.pythonhosted.org/packages/d6/26/9d86ae12699a25b7233f76ce062253e9c14e57781e00166b792b3a9d56db/docling_parse-4.7.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:d89231aa4fba3e38b80c11beb8edc07569e934c1f3935b51f57904fefe958ba5", size = 14616739, upload-time = "2026-01-14T14:17:38.567Z" }, - { url = "https://files.pythonhosted.org/packages/f2/fd/1aebb8a7f15d658f3be858ddbbc4ef7206089d540a7df0dcd4b846b99901/docling_parse-4.7.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dffd19ed373b0da5cea124606b183489a8686c3d18643e94485be1bdda5713ea", size = 14980782, upload-time = "2026-01-14T14:17:40.659Z" }, - { url = "https://files.pythonhosted.org/packages/3e/47/a722527c9f89c65f69f8a463be4f12ad73bae18132f29d8de8b2d9f6f082/docling_parse-4.7.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc32b6f25a673e41b9a8112b6b841284f60dbac9427b7848a03b435460f74aee", size = 15092450, upload-time = "2026-01-14T14:17:42.838Z" }, - { url = "https://files.pythonhosted.org/packages/91/c7/316373a92ba42c2aeaee128fc77a34333449fe3e820b9d524e0ee396ea35/docling_parse-4.7.3-cp312-cp312-win_amd64.whl", hash = "sha256:ef691045623863624f2cb7347572d0262a53cb84940ef7dd851d9f13a2eb8833", size = 16147359, upload-time = "2026-01-14T14:17:44.906Z" }, - { url = "https://files.pythonhosted.org/packages/c9/9f/b62390c85f99436fd0c40cfcdfea2b553482696ca735e4cc0eee96b765aa/docling_parse-4.7.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:6cb4fe8c62de06b70e6b38c4bd608f41ea3e9d7154a4e05f9a3c4d8944fe3a25", size = 14616910, upload-time = "2026-01-14T14:17:47.146Z" }, - { url = "https://files.pythonhosted.org/packages/15/c4/a18d70118ff26b12021effab53d2ffe0c7e6ef378e92c35941b5557529c1/docling_parse-4.7.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9d18a5b1f7eecabed631c497a19f19d281a0d86f24bfe5d239e3df89bdc4df32", size = 14981477, upload-time = "2026-01-14T14:17:49.659Z" }, - { url = "https://files.pythonhosted.org/packages/cf/e6/899f033d80cb2b4e182226c73c6e91660df42e8867b76a04f0c024db7cb6/docling_parse-4.7.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4a93f91f97055e19cade33bb957d83f8615f1d2a0103b89827aca16b31a3e22", size = 15092546, upload-time = "2026-01-14T14:17:51.6Z" }, - { url = "https://files.pythonhosted.org/packages/95/f3/6dbd2e9c018b44ffe1de3d0a1ea1b017ee25b2a2f21934495710beb6d4d7/docling_parse-4.7.3-cp313-cp313-win_amd64.whl", hash = "sha256:c5a416ae2e1761914ee8d7dbfbe3858e106c876b5a7fccaa3917c038e2f126ec", size = 16147305, upload-time = "2026-01-14T14:17:53.925Z" }, - { url = "https://files.pythonhosted.org/packages/c5/73/d07d205b82d516db32346a9cb833716b4b39e0c37118d50592e8d85adcd1/docling_parse-4.7.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:53bd45241dca228715800afa0f96fdc826f7c234e9effcd5cefc86026ff19301", size = 14617441, upload-time = "2026-01-14T14:17:56.315Z" }, - { url = "https://files.pythonhosted.org/packages/0a/ae/b970af23daeb3be24241044a810197b0ddffb8d4d2d451e6dc6669b086e4/docling_parse-4.7.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ca64977a19ecd580a48f22137a30470d7ccf0995b2c25a74136c6facec7c617d", size = 14981828, upload-time = "2026-01-14T14:17:59.147Z" }, - { url = "https://files.pythonhosted.org/packages/4e/69/b0732d6b47e80c9108ed8c8ed1db880beddac3a49d68f5f5e853a90553c9/docling_parse-4.7.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29c91f78c877ae4637011efdb478f20a571e6794be924795b3469958a6401cd6", size = 15092644, upload-time = "2026-01-14T14:18:01.05Z" }, - { url = "https://files.pythonhosted.org/packages/93/2e/7ae85c9ea1e75cf485f5e2af39bf1706c49570f8856b6c345098d25a9078/docling_parse-4.7.3-cp314-cp314-win_amd64.whl", hash = "sha256:75522790df921b6be5d86cf26d184a4af97c1c65e2d22698a9516bc049c398cf", size = 16787387, upload-time = "2026-01-14T14:18:03.353Z" }, - { url = "https://files.pythonhosted.org/packages/4c/58/bcf78e156bf261de21c2ab2843f60aefd0b15217af69756a2ff0cd8287f5/docling_parse-4.7.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a6e0f9e18d808c87ce0fe1900c74a3496a42743f4bba7ed4dd83a0e6e168644a", size = 18061956, upload-time = "2026-01-14T14:18:12.96Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/51/ed/f85c8885c91e7004aff287610c1bd08fb7262cb61c1a5ca644f85315acdc/docling_parse-5.3.2.tar.gz", hash = "sha256:84e619ddb819c7fd84387ebaa195f5dfb07b52a992ad18181c47c63741161f07", size = 55397956, upload-time = "2026-02-17T16:56:42.396Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/f1/76b47276251b7dcbf9740f5648a0b6baebbbd72a7dca5176ab4e38152cf0/docling_parse-5.3.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:2b1f942a4877f833d4aec0a0de9a31c9e6ca4af60b6242fe14c1b4fc31032438", size = 7760922, upload-time = "2026-02-17T16:56:08.165Z" }, + { url = "https://files.pythonhosted.org/packages/dc/23/a0d2d2e94bca7d83a51c6191b4c8cf6adc0826ea498f211deb7d0a0e8268/docling_parse-5.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:483cb1c95db1d9a4533f872c3b52276dc03813bb1489e265cd5b13671238860f", size = 8210471, upload-time = "2026-02-17T16:56:09.603Z" }, + { url = "https://files.pythonhosted.org/packages/e4/ca/e9950f938ca409c95282a3bacae7359141027e0859dbf5501cb4d940a616/docling_parse-5.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76154e72236bdb888c08394316b040cb988e252a803c9b69bd4262548f304c9e", size = 8267773, upload-time = "2026-02-17T16:56:11.11Z" }, + { url = "https://files.pythonhosted.org/packages/98/5c/fced801964e0ef9bd0d460f78069d4c75ee17c26e93190c8b3591943c928/docling_parse-5.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:1d6751be74973545725b8d2e9cc8c0a908633da9eb39de6e62b7a0a08d16b2df", size = 9167411, upload-time = "2026-02-17T16:56:12.571Z" }, + { url = "https://files.pythonhosted.org/packages/51/c3/1e49b10fa536484e1c94830c01c089271985d7a2d68719eb1bf13398a6e8/docling_parse-5.3.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:12e6683e7eeef15e17e7dbd5ae8b4b6fb636f1f7d4da01524982a25a0e190bfa", size = 7761864, upload-time = "2026-02-17T16:56:15.492Z" }, + { url = "https://files.pythonhosted.org/packages/a1/01/4387850742a031490701acd4f2e6967f258644d2d9ae383b5dc42b47d8da/docling_parse-5.3.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0da5304ecefbdaadc1a0adbf03e49358c8be833326efd40b9a2f8f3ef8f8d73c", size = 8164019, upload-time = "2026-02-17T16:56:17.89Z" }, + { url = "https://files.pythonhosted.org/packages/85/1d/507559ab33cec293a28115ce33b28edd074375202b46ae2125d8f65d8875/docling_parse-5.3.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:31dae6b7fec33470b6a956f42c37962fff86e125fc4ede13ec1d3b7c59265be6", size = 8257803, upload-time = "2026-02-17T16:56:19.652Z" }, + { url = "https://files.pythonhosted.org/packages/2d/65/2770b00c6e5d01ce05e3d7df631a586c50dca8421247785e65ee449c862b/docling_parse-5.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:d6af84e4c463fd80502f35cc3182a14a0d2178a26bf21b612dfa9b0840bfc784", size = 9168840, upload-time = "2026-02-17T16:56:20.912Z" }, + { url = "https://files.pythonhosted.org/packages/10/66/2358f8f1991c926ae4bae6141a72c74fc56f15095d5448173016f391eb29/docling_parse-5.3.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:936631ddf5e4645bcae30adf37360a4ff52e5c92736781bab79922b7a80cd1ae", size = 7762664, upload-time = "2026-02-17T16:56:22.645Z" }, + { url = "https://files.pythonhosted.org/packages/b7/70/7a63c12333806889b3767e517be6fa2dc5a44c75639be967d9284b2ca9ce/docling_parse-5.3.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cdd615dae09d63b78ce40cf60c16a1082564ebd995fc606f09d592bdac70bb62", size = 8164369, upload-time = "2026-02-17T16:56:24.641Z" }, + { url = "https://files.pythonhosted.org/packages/2e/a1/3b997636dde5961e857572b90552f18322424ce5c53dd29da423388a0fce/docling_parse-5.3.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cf1233d93a007c6debea3030eea6517c06ea744ff211d7795910c53b346487b3", size = 8256838, upload-time = "2026-02-17T16:56:26.017Z" }, + { url = "https://files.pythonhosted.org/packages/b3/60/e45bb811488f28603529fbc8ecca2d4515a92beb11bafadfc4c8d283b8e3/docling_parse-5.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:567744907fa4abc7f2900ecb9f2ed19c8d6a312b22e94d8b6b301eaf23d48b53", size = 9170363, upload-time = "2026-02-17T16:56:27.469Z" }, + { url = "https://files.pythonhosted.org/packages/30/2f/be7ae5a628c16cdad047df6f5578ed06360e4f6c76e54e5ba4bbb583b8f4/docling_parse-5.3.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f56942621a133a2754ca7944ebf236107fc06e90941b465adcfe4ee09dcbe548", size = 7762603, upload-time = "2026-02-17T16:56:29.179Z" }, + { url = "https://files.pythonhosted.org/packages/4a/a6/92d58510fb193403f6eb8be133a86df05f6ba809a9081fe486f446f3a965/docling_parse-5.3.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:acaa098cb85d17895cdcaab4208e4a9b0fd16c395c94207b002d064a18856236", size = 8164640, upload-time = "2026-02-17T16:56:31.101Z" }, + { url = "https://files.pythonhosted.org/packages/2f/82/16c058601758e8778df202e5326aecd109a6ea0351963e665b03cc94b3a7/docling_parse-5.3.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e35a8609ef5afe32e525c74d1a4535486ff74b323b0b403462577f2aef13b90", size = 8257245, upload-time = "2026-02-17T16:56:32.505Z" }, + { url = "https://files.pythonhosted.org/packages/2f/3f/2b641a63dacdf65d4a2732b5dbcfa81685041b671662af162c1c4913bb63/docling_parse-5.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e53e09303146c5a7f089123e86d609f7d611e084a0ba1811041f49cdc318e83c", size = 9170273, upload-time = "2026-02-17T16:56:33.821Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/344aa1951cd0de6a0582f0d3371c3bf5a4dd650efa4abd3f96b9827355de/docling_parse-5.3.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:743083c57ed320727ba0a87682300b1d008529012bc781b57e08f248649e6a81", size = 7763166, upload-time = "2026-02-17T16:56:35.49Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d4/d1548864507ce803f064167809b78261530aff00d4e89bf9a9e26574e580/docling_parse-5.3.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63c6fa9d579bfd9d5fed81313a77deedb716b321371277a656164a840a1e1030", size = 8164612, upload-time = "2026-02-17T16:56:36.87Z" }, + { url = "https://files.pythonhosted.org/packages/9c/64/cb6fd85cdd8e285300af37903bcfb4f554fd67c4a0d31889cbd21f698ce1/docling_parse-5.3.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0315fdcd21dc561510ba72ceab2b935376007b6a736c23099ccc260d25bb1ec5", size = 8257277, upload-time = "2026-02-17T16:56:38.298Z" }, + { url = "https://files.pythonhosted.org/packages/7f/29/23898ead473f60e196997922857971a194f5996fb8c18ab58f0b2132af73/docling_parse-5.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:80d4b7e5e630982241b1794d3f0f707b5be6cf933d3e449e8c7f90bdbe5601be", size = 9529245, upload-time = "2026-02-17T16:56:40.186Z" }, ] [[package]] @@ -1066,11 +1099,11 @@ wheels = [ [[package]] name = "filelock" -version = "3.20.3" +version = "3.24.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" } +sdist = { url = "https://files.pythonhosted.org/packages/02/a8/dae62680be63cbb3ff87cfa2f51cf766269514ea5488479d42fec5aa6f3a/filelock-3.24.2.tar.gz", hash = "sha256:c22803117490f156e59fafce621f0550a7a853e2bbf4f87f112b11d469b6c81b", size = 37601, upload-time = "2026-02-16T02:50:45.614Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" }, + { url = "https://files.pythonhosted.org/packages/e7/04/a94ebfb4eaaa08db56725a40de2887e95de4e8641b9e902c311bfa00aa39/filelock-3.24.2-py3-none-any.whl", hash = "sha256:667d7dc0b7d1e1064dd5f8f8e80bdac157a6482e8d2e02cd16fd3b6b33bd6556", size = 24152, upload-time = "2026-02-16T02:50:44Z" }, ] [[package]] @@ -1339,7 +1372,7 @@ wheels = [ [[package]] name = "google-genai" -version = "1.62.0" +version = "1.63.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1353,9 +1386,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/94/4c/71b32b5c8db420cf2fd0d5ef8a672adbde97d85e5d44a0b4fca712264ef1/google_genai-1.62.0.tar.gz", hash = "sha256:709468a14c739a080bc240a4f3191df597bf64485b1ca3728e0fb67517774c18", size = 490888, upload-time = "2026-02-04T22:48:41.989Z" } +sdist = { url = "https://files.pythonhosted.org/packages/46/d7/07ec5dadd0741f09e89f3ff5f0ce051ce2aa3a76797699d661dc88def077/google_genai-1.63.0.tar.gz", hash = "sha256:dc76cab810932df33cbec6c7ef3ce1538db5bef27aaf78df62ac38666c476294", size = 491970, upload-time = "2026-02-11T23:46:28.472Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/09/5f/4645d8a28c6e431d0dd6011003a852563f3da7037d36af53154925b099fd/google_genai-1.62.0-py3-none-any.whl", hash = "sha256:4c3daeff3d05fafee4b9a1a31f9c07f01bc22051081aa58b4d61f58d16d1bcc0", size = 724166, upload-time = "2026-02-04T22:48:39.956Z" }, + { url = "https://files.pythonhosted.org/packages/82/c8/ba32159e553fab787708c612cf0c3a899dafe7aca81115d841766e3bfe69/google_genai-1.63.0-py3-none-any.whl", hash = "sha256:6206c13fc20f332703ca7375bea7c191c82f95d6781c29936c6982d86599b359", size = 724747, upload-time = "2026-02-11T23:46:26.697Z" }, ] [[package]] @@ -1757,75 +1790,87 @@ wheels = [ [[package]] name = "librt" -version = "0.7.8" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/24/5f3646ff414285e0f7708fa4e946b9bf538345a41d1c375c439467721a5e/librt-0.7.8.tar.gz", hash = "sha256:1a4ede613941d9c3470b0368be851df6bb78ab218635512d0370b27a277a0862", size = 148323, upload-time = "2026-01-14T12:56:16.876Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/44/13/57b06758a13550c5f09563893b004f98e9537ee6ec67b7df85c3571c8832/librt-0.7.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b45306a1fc5f53c9330fbee134d8b3227fe5da2ab09813b892790400aa49352d", size = 56521, upload-time = "2026-01-14T12:54:40.066Z" }, - { url = "https://files.pythonhosted.org/packages/c2/24/bbea34d1452a10612fb45ac8356f95351ba40c2517e429602160a49d1fd0/librt-0.7.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:864c4b7083eeee250ed55135d2127b260d7eb4b5e953a9e5df09c852e327961b", size = 58456, upload-time = "2026-01-14T12:54:41.471Z" }, - { url = "https://files.pythonhosted.org/packages/04/72/a168808f92253ec3a810beb1eceebc465701197dbc7e865a1c9ceb3c22c7/librt-0.7.8-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6938cc2de153bc927ed8d71c7d2f2ae01b4e96359126c602721340eb7ce1a92d", size = 164392, upload-time = "2026-01-14T12:54:42.843Z" }, - { url = "https://files.pythonhosted.org/packages/14/5c/4c0d406f1b02735c2e7af8ff1ff03a6577b1369b91aa934a9fa2cc42c7ce/librt-0.7.8-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:66daa6ac5de4288a5bbfbe55b4caa7bf0cd26b3269c7a476ffe8ce45f837f87d", size = 172959, upload-time = "2026-01-14T12:54:44.602Z" }, - { url = "https://files.pythonhosted.org/packages/82/5f/3e85351c523f73ad8d938989e9a58c7f59fb9c17f761b9981b43f0025ce7/librt-0.7.8-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4864045f49dc9c974dadb942ac56a74cd0479a2aafa51ce272c490a82322ea3c", size = 186717, upload-time = "2026-01-14T12:54:45.986Z" }, - { url = "https://files.pythonhosted.org/packages/08/f8/18bfe092e402d00fe00d33aa1e01dda1bd583ca100b393b4373847eade6d/librt-0.7.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a36515b1328dc5b3ffce79fe204985ca8572525452eacabee2166f44bb387b2c", size = 184585, upload-time = "2026-01-14T12:54:47.139Z" }, - { url = "https://files.pythonhosted.org/packages/4e/fc/f43972ff56fd790a9fa55028a52ccea1875100edbb856b705bd393b601e3/librt-0.7.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b7e7f140c5169798f90b80d6e607ed2ba5059784968a004107c88ad61fb3641d", size = 180497, upload-time = "2026-01-14T12:54:48.946Z" }, - { url = "https://files.pythonhosted.org/packages/e1/3a/25e36030315a410d3ad0b7d0f19f5f188e88d1613d7d3fd8150523ea1093/librt-0.7.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ff71447cb778a4f772ddc4ce360e6ba9c95527ed84a52096bd1bbf9fee2ec7c0", size = 200052, upload-time = "2026-01-14T12:54:50.382Z" }, - { url = "https://files.pythonhosted.org/packages/fc/b8/f3a5a1931ae2a6ad92bf6893b9ef44325b88641d58723529e2c2935e8abe/librt-0.7.8-cp310-cp310-win32.whl", hash = "sha256:047164e5f68b7a8ebdf9fae91a3c2161d3192418aadd61ddd3a86a56cbe3dc85", size = 43477, upload-time = "2026-01-14T12:54:51.815Z" }, - { url = "https://files.pythonhosted.org/packages/fe/91/c4202779366bc19f871b4ad25db10fcfa1e313c7893feb942f32668e8597/librt-0.7.8-cp310-cp310-win_amd64.whl", hash = "sha256:d6f254d096d84156a46a84861183c183d30734e52383602443292644d895047c", size = 49806, upload-time = "2026-01-14T12:54:53.149Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a3/87ea9c1049f2c781177496ebee29430e4631f439b8553a4969c88747d5d8/librt-0.7.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ff3e9c11aa260c31493d4b3197d1e28dd07768594a4f92bec4506849d736248f", size = 56507, upload-time = "2026-01-14T12:54:54.156Z" }, - { url = "https://files.pythonhosted.org/packages/5e/4a/23bcef149f37f771ad30203d561fcfd45b02bc54947b91f7a9ac34815747/librt-0.7.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddb52499d0b3ed4aa88746aaf6f36a08314677d5c346234c3987ddc506404eac", size = 58455, upload-time = "2026-01-14T12:54:55.978Z" }, - { url = "https://files.pythonhosted.org/packages/22/6e/46eb9b85c1b9761e0f42b6e6311e1cc544843ac897457062b9d5d0b21df4/librt-0.7.8-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e9c0afebbe6ce177ae8edba0c7c4d626f2a0fc12c33bb993d163817c41a7a05c", size = 164956, upload-time = "2026-01-14T12:54:57.311Z" }, - { url = "https://files.pythonhosted.org/packages/7a/3f/aa7c7f6829fb83989feb7ba9aa11c662b34b4bd4bd5b262f2876ba3db58d/librt-0.7.8-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:631599598e2c76ded400c0a8722dec09217c89ff64dc54b060f598ed68e7d2a8", size = 174364, upload-time = "2026-01-14T12:54:59.089Z" }, - { url = "https://files.pythonhosted.org/packages/3f/2d/d57d154b40b11f2cb851c4df0d4c4456bacd9b1ccc4ecb593ddec56c1a8b/librt-0.7.8-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c1ba843ae20db09b9d5c80475376168feb2640ce91cd9906414f23cc267a1ff", size = 188034, upload-time = "2026-01-14T12:55:00.141Z" }, - { url = "https://files.pythonhosted.org/packages/59/f9/36c4dad00925c16cd69d744b87f7001792691857d3b79187e7a673e812fb/librt-0.7.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b5b007bb22ea4b255d3ee39dfd06d12534de2fcc3438567d9f48cdaf67ae1ae3", size = 186295, upload-time = "2026-01-14T12:55:01.303Z" }, - { url = "https://files.pythonhosted.org/packages/23/9b/8a9889d3df5efb67695a67785028ccd58e661c3018237b73ad081691d0cb/librt-0.7.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dbd79caaf77a3f590cbe32dc2447f718772d6eea59656a7dcb9311161b10fa75", size = 181470, upload-time = "2026-01-14T12:55:02.492Z" }, - { url = "https://files.pythonhosted.org/packages/43/64/54d6ef11afca01fef8af78c230726a9394759f2addfbf7afc5e3cc032a45/librt-0.7.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:87808a8d1e0bd62a01cafc41f0fd6818b5a5d0ca0d8a55326a81643cdda8f873", size = 201713, upload-time = "2026-01-14T12:55:03.919Z" }, - { url = "https://files.pythonhosted.org/packages/2d/29/73e7ed2991330b28919387656f54109139b49e19cd72902f466bd44415fd/librt-0.7.8-cp311-cp311-win32.whl", hash = "sha256:31724b93baa91512bd0a376e7cf0b59d8b631ee17923b1218a65456fa9bda2e7", size = 43803, upload-time = "2026-01-14T12:55:04.996Z" }, - { url = "https://files.pythonhosted.org/packages/3f/de/66766ff48ed02b4d78deea30392ae200bcbd99ae61ba2418b49fd50a4831/librt-0.7.8-cp311-cp311-win_amd64.whl", hash = "sha256:978e8b5f13e52cf23a9e80f3286d7546baa70bc4ef35b51d97a709d0b28e537c", size = 50080, upload-time = "2026-01-14T12:55:06.489Z" }, - { url = "https://files.pythonhosted.org/packages/6f/e3/33450438ff3a8c581d4ed7f798a70b07c3206d298cf0b87d3806e72e3ed8/librt-0.7.8-cp311-cp311-win_arm64.whl", hash = "sha256:20e3946863d872f7cabf7f77c6c9d370b8b3d74333d3a32471c50d3a86c0a232", size = 43383, upload-time = "2026-01-14T12:55:07.49Z" }, - { url = "https://files.pythonhosted.org/packages/56/04/79d8fcb43cae376c7adbab7b2b9f65e48432c9eced62ac96703bcc16e09b/librt-0.7.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9b6943885b2d49c48d0cff23b16be830ba46b0152d98f62de49e735c6e655a63", size = 57472, upload-time = "2026-01-14T12:55:08.528Z" }, - { url = "https://files.pythonhosted.org/packages/b4/ba/60b96e93043d3d659da91752689023a73981336446ae82078cddf706249e/librt-0.7.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46ef1f4b9b6cc364b11eea0ecc0897314447a66029ee1e55859acb3dd8757c93", size = 58986, upload-time = "2026-01-14T12:55:09.466Z" }, - { url = "https://files.pythonhosted.org/packages/7c/26/5215e4cdcc26e7be7eee21955a7e13cbf1f6d7d7311461a6014544596fac/librt-0.7.8-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:907ad09cfab21e3c86e8f1f87858f7049d1097f77196959c033612f532b4e592", size = 168422, upload-time = "2026-01-14T12:55:10.499Z" }, - { url = "https://files.pythonhosted.org/packages/0f/84/e8d1bc86fa0159bfc24f3d798d92cafd3897e84c7fea7fe61b3220915d76/librt-0.7.8-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2991b6c3775383752b3ca0204842743256f3ad3deeb1d0adc227d56b78a9a850", size = 177478, upload-time = "2026-01-14T12:55:11.577Z" }, - { url = "https://files.pythonhosted.org/packages/57/11/d0268c4b94717a18aa91df1100e767b010f87b7ae444dafaa5a2d80f33a6/librt-0.7.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03679b9856932b8c8f674e87aa3c55ea11c9274301f76ae8dc4d281bda55cf62", size = 192439, upload-time = "2026-01-14T12:55:12.7Z" }, - { url = "https://files.pythonhosted.org/packages/8d/56/1e8e833b95fe684f80f8894ae4d8b7d36acc9203e60478fcae599120a975/librt-0.7.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3968762fec1b2ad34ce57458b6de25dbb4142713e9ca6279a0d352fa4e9f452b", size = 191483, upload-time = "2026-01-14T12:55:13.838Z" }, - { url = "https://files.pythonhosted.org/packages/17/48/f11cf28a2cb6c31f282009e2208312aa84a5ee2732859f7856ee306176d5/librt-0.7.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bb7a7807523a31f03061288cc4ffc065d684c39db7644c676b47d89553c0d714", size = 185376, upload-time = "2026-01-14T12:55:15.017Z" }, - { url = "https://files.pythonhosted.org/packages/b8/6a/d7c116c6da561b9155b184354a60a3d5cdbf08fc7f3678d09c95679d13d9/librt-0.7.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad64a14b1e56e702e19b24aae108f18ad1bf7777f3af5fcd39f87d0c5a814449", size = 206234, upload-time = "2026-01-14T12:55:16.571Z" }, - { url = "https://files.pythonhosted.org/packages/61/de/1975200bb0285fc921c5981d9978ce6ce11ae6d797df815add94a5a848a3/librt-0.7.8-cp312-cp312-win32.whl", hash = "sha256:0241a6ed65e6666236ea78203a73d800dbed896cf12ae25d026d75dc1fcd1dac", size = 44057, upload-time = "2026-01-14T12:55:18.077Z" }, - { url = "https://files.pythonhosted.org/packages/8e/cd/724f2d0b3461426730d4877754b65d39f06a41ac9d0a92d5c6840f72b9ae/librt-0.7.8-cp312-cp312-win_amd64.whl", hash = "sha256:6db5faf064b5bab9675c32a873436b31e01d66ca6984c6f7f92621656033a708", size = 50293, upload-time = "2026-01-14T12:55:19.179Z" }, - { url = "https://files.pythonhosted.org/packages/bd/cf/7e899acd9ee5727ad8160fdcc9994954e79fab371c66535c60e13b968ffc/librt-0.7.8-cp312-cp312-win_arm64.whl", hash = "sha256:57175aa93f804d2c08d2edb7213e09276bd49097611aefc37e3fa38d1fb99ad0", size = 43574, upload-time = "2026-01-14T12:55:20.185Z" }, - { url = "https://files.pythonhosted.org/packages/a1/fe/b1f9de2829cf7fc7649c1dcd202cfd873837c5cc2fc9e526b0e7f716c3d2/librt-0.7.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4c3995abbbb60b3c129490fa985dfe6cac11d88fc3c36eeb4fb1449efbbb04fc", size = 57500, upload-time = "2026-01-14T12:55:21.219Z" }, - { url = "https://files.pythonhosted.org/packages/eb/d4/4a60fbe2e53b825f5d9a77325071d61cd8af8506255067bf0c8527530745/librt-0.7.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:44e0c2cbc9bebd074cf2cdbe472ca185e824be4e74b1c63a8e934cea674bebf2", size = 59019, upload-time = "2026-01-14T12:55:22.256Z" }, - { url = "https://files.pythonhosted.org/packages/6a/37/61ff80341ba5159afa524445f2d984c30e2821f31f7c73cf166dcafa5564/librt-0.7.8-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4d2f1e492cae964b3463a03dc77a7fe8742f7855d7258c7643f0ee32b6651dd3", size = 169015, upload-time = "2026-01-14T12:55:23.24Z" }, - { url = "https://files.pythonhosted.org/packages/1c/86/13d4f2d6a93f181ebf2fc953868826653ede494559da8268023fe567fca3/librt-0.7.8-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:451e7ffcef8f785831fdb791bd69211f47e95dc4c6ddff68e589058806f044c6", size = 178161, upload-time = "2026-01-14T12:55:24.826Z" }, - { url = "https://files.pythonhosted.org/packages/88/26/e24ef01305954fc4d771f1f09f3dd682f9eb610e1bec188ffb719374d26e/librt-0.7.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3469e1af9f1380e093ae06bedcbdd11e407ac0b303a56bbe9afb1d6824d4982d", size = 193015, upload-time = "2026-01-14T12:55:26.04Z" }, - { url = "https://files.pythonhosted.org/packages/88/a0/92b6bd060e720d7a31ed474d046a69bd55334ec05e9c446d228c4b806ae3/librt-0.7.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f11b300027ce19a34f6d24ebb0a25fd0e24a9d53353225a5c1e6cadbf2916b2e", size = 192038, upload-time = "2026-01-14T12:55:27.208Z" }, - { url = "https://files.pythonhosted.org/packages/06/bb/6f4c650253704279c3a214dad188101d1b5ea23be0606628bc6739456624/librt-0.7.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4adc73614f0d3c97874f02f2c7fd2a27854e7e24ad532ea6b965459c5b757eca", size = 186006, upload-time = "2026-01-14T12:55:28.594Z" }, - { url = "https://files.pythonhosted.org/packages/dc/00/1c409618248d43240cadf45f3efb866837fa77e9a12a71481912135eb481/librt-0.7.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60c299e555f87e4c01b2eca085dfccda1dde87f5a604bb45c2906b8305819a93", size = 206888, upload-time = "2026-01-14T12:55:30.214Z" }, - { url = "https://files.pythonhosted.org/packages/d9/83/b2cfe8e76ff5c1c77f8a53da3d5de62d04b5ebf7cf913e37f8bca43b5d07/librt-0.7.8-cp313-cp313-win32.whl", hash = "sha256:b09c52ed43a461994716082ee7d87618096851319bf695d57ec123f2ab708951", size = 44126, upload-time = "2026-01-14T12:55:31.44Z" }, - { url = "https://files.pythonhosted.org/packages/a9/0b/c59d45de56a51bd2d3a401fc63449c0ac163e4ef7f523ea8b0c0dee86ec5/librt-0.7.8-cp313-cp313-win_amd64.whl", hash = "sha256:f8f4a901a3fa28969d6e4519deceab56c55a09d691ea7b12ca830e2fa3461e34", size = 50262, upload-time = "2026-01-14T12:55:33.01Z" }, - { url = "https://files.pythonhosted.org/packages/fc/b9/973455cec0a1ec592395250c474164c4a58ebf3e0651ee920fef1a2623f1/librt-0.7.8-cp313-cp313-win_arm64.whl", hash = "sha256:43d4e71b50763fcdcf64725ac680d8cfa1706c928b844794a7aa0fa9ac8e5f09", size = 43600, upload-time = "2026-01-14T12:55:34.054Z" }, - { url = "https://files.pythonhosted.org/packages/1a/73/fa8814c6ce2d49c3827829cadaa1589b0bf4391660bd4510899393a23ebc/librt-0.7.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:be927c3c94c74b05128089a955fba86501c3b544d1d300282cc1b4bd370cb418", size = 57049, upload-time = "2026-01-14T12:55:35.056Z" }, - { url = "https://files.pythonhosted.org/packages/53/fe/f6c70956da23ea235fd2e3cc16f4f0b4ebdfd72252b02d1164dd58b4e6c3/librt-0.7.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7b0803e9008c62a7ef79058233db7ff6f37a9933b8f2573c05b07ddafa226611", size = 58689, upload-time = "2026-01-14T12:55:36.078Z" }, - { url = "https://files.pythonhosted.org/packages/1f/4d/7a2481444ac5fba63050d9abe823e6bc16896f575bfc9c1e5068d516cdce/librt-0.7.8-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:79feb4d00b2a4e0e05c9c56df707934f41fcb5fe53fd9efb7549068d0495b758", size = 166808, upload-time = "2026-01-14T12:55:37.595Z" }, - { url = "https://files.pythonhosted.org/packages/ac/3c/10901d9e18639f8953f57c8986796cfbf4c1c514844a41c9197cf87cb707/librt-0.7.8-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b9122094e3f24aa759c38f46bd8863433820654927370250f460ae75488b66ea", size = 175614, upload-time = "2026-01-14T12:55:38.756Z" }, - { url = "https://files.pythonhosted.org/packages/db/01/5cbdde0951a5090a80e5ba44e6357d375048123c572a23eecfb9326993a7/librt-0.7.8-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7e03bea66af33c95ce3addf87a9bf1fcad8d33e757bc479957ddbc0e4f7207ac", size = 189955, upload-time = "2026-01-14T12:55:39.939Z" }, - { url = "https://files.pythonhosted.org/packages/6a/b4/e80528d2f4b7eaf1d437fcbd6fc6ba4cbeb3e2a0cb9ed5a79f47c7318706/librt-0.7.8-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f1ade7f31675db00b514b98f9ab9a7698c7282dad4be7492589109471852d398", size = 189370, upload-time = "2026-01-14T12:55:41.057Z" }, - { url = "https://files.pythonhosted.org/packages/c1/ab/938368f8ce31a9787ecd4becb1e795954782e4312095daf8fd22420227c8/librt-0.7.8-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a14229ac62adcf1b90a15992f1ab9c69ae8b99ffb23cb64a90878a6e8a2f5b81", size = 183224, upload-time = "2026-01-14T12:55:42.328Z" }, - { url = "https://files.pythonhosted.org/packages/3c/10/559c310e7a6e4014ac44867d359ef8238465fb499e7eb31b6bfe3e3f86f5/librt-0.7.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5bcaaf624fd24e6a0cb14beac37677f90793a96864c67c064a91458611446e83", size = 203541, upload-time = "2026-01-14T12:55:43.501Z" }, - { url = "https://files.pythonhosted.org/packages/f8/db/a0db7acdb6290c215f343835c6efda5b491bb05c3ddc675af558f50fdba3/librt-0.7.8-cp314-cp314-win32.whl", hash = "sha256:7aa7d5457b6c542ecaed79cec4ad98534373c9757383973e638ccced0f11f46d", size = 40657, upload-time = "2026-01-14T12:55:44.668Z" }, - { url = "https://files.pythonhosted.org/packages/72/e0/4f9bdc2a98a798511e81edcd6b54fe82767a715e05d1921115ac70717f6f/librt-0.7.8-cp314-cp314-win_amd64.whl", hash = "sha256:3d1322800771bee4a91f3b4bd4e49abc7d35e65166821086e5afd1e6c0d9be44", size = 46835, upload-time = "2026-01-14T12:55:45.655Z" }, - { url = "https://files.pythonhosted.org/packages/f9/3d/59c6402e3dec2719655a41ad027a7371f8e2334aa794ed11533ad5f34969/librt-0.7.8-cp314-cp314-win_arm64.whl", hash = "sha256:5363427bc6a8c3b1719f8f3845ea53553d301382928a86e8fab7984426949bce", size = 39885, upload-time = "2026-01-14T12:55:47.138Z" }, - { url = "https://files.pythonhosted.org/packages/4e/9c/2481d80950b83085fb14ba3c595db56330d21bbc7d88a19f20165f3538db/librt-0.7.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:ca916919793a77e4a98d4a1701e345d337ce53be4a16620f063191f7322ac80f", size = 59161, upload-time = "2026-01-14T12:55:48.45Z" }, - { url = "https://files.pythonhosted.org/packages/96/79/108df2cfc4e672336765d54e3ff887294c1cc36ea4335c73588875775527/librt-0.7.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:54feb7b4f2f6706bb82325e836a01be805770443e2400f706e824e91f6441dde", size = 61008, upload-time = "2026-01-14T12:55:49.527Z" }, - { url = "https://files.pythonhosted.org/packages/46/f2/30179898f9994a5637459d6e169b6abdc982012c0a4b2d4c26f50c06f911/librt-0.7.8-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:39a4c76fee41007070f872b648cc2f711f9abf9a13d0c7162478043377b52c8e", size = 187199, upload-time = "2026-01-14T12:55:50.587Z" }, - { url = "https://files.pythonhosted.org/packages/b4/da/f7563db55cebdc884f518ba3791ad033becc25ff68eb70902b1747dc0d70/librt-0.7.8-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac9c8a458245c7de80bc1b9765b177055efff5803f08e548dd4bb9ab9a8d789b", size = 198317, upload-time = "2026-01-14T12:55:51.991Z" }, - { url = "https://files.pythonhosted.org/packages/b3/6c/4289acf076ad371471fa86718c30ae353e690d3de6167f7db36f429272f1/librt-0.7.8-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b67aa7eff150f075fda09d11f6bfb26edffd300f6ab1666759547581e8f666", size = 210334, upload-time = "2026-01-14T12:55:53.682Z" }, - { url = "https://files.pythonhosted.org/packages/4a/7f/377521ac25b78ac0a5ff44127a0360ee6d5ddd3ce7327949876a30533daa/librt-0.7.8-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:535929b6eff670c593c34ff435d5440c3096f20fa72d63444608a5aef64dd581", size = 211031, upload-time = "2026-01-14T12:55:54.827Z" }, - { url = "https://files.pythonhosted.org/packages/c5/b1/e1e96c3e20b23d00cf90f4aad48f0deb4cdfec2f0ed8380d0d85acf98bbf/librt-0.7.8-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:63937bd0f4d1cb56653dc7ae900d6c52c41f0015e25aaf9902481ee79943b33a", size = 204581, upload-time = "2026-01-14T12:55:56.811Z" }, - { url = "https://files.pythonhosted.org/packages/43/71/0f5d010e92ed9747e14bef35e91b6580533510f1e36a8a09eb79ee70b2f0/librt-0.7.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf243da9e42d914036fd362ac3fa77d80a41cadcd11ad789b1b5eec4daaf67ca", size = 224731, upload-time = "2026-01-14T12:55:58.175Z" }, - { url = "https://files.pythonhosted.org/packages/22/f0/07fb6ab5c39a4ca9af3e37554f9d42f25c464829254d72e4ebbd81da351c/librt-0.7.8-cp314-cp314t-win32.whl", hash = "sha256:171ca3a0a06c643bd0a2f62a8944e1902c94aa8e5da4db1ea9a8daf872685365", size = 41173, upload-time = "2026-01-14T12:55:59.315Z" }, - { url = "https://files.pythonhosted.org/packages/24/d4/7e4be20993dc6a782639625bd2f97f3c66125c7aa80c82426956811cfccf/librt-0.7.8-cp314-cp314t-win_amd64.whl", hash = "sha256:445b7304145e24c60288a2f172b5ce2ca35c0f81605f5299f3fa567e189d2e32", size = 47668, upload-time = "2026-01-14T12:56:00.261Z" }, - { url = "https://files.pythonhosted.org/packages/fc/85/69f92b2a7b3c0f88ffe107c86b952b397004b5b8ea5a81da3d9c04c04422/librt-0.7.8-cp314-cp314t-win_arm64.whl", hash = "sha256:8766ece9de08527deabcd7cb1b4f1a967a385d26e33e536d6d8913db6ef74f06", size = 40550, upload-time = "2026-01-14T12:56:01.542Z" }, +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/56/9c/b4b0c54d84da4a94b37bd44151e46d5e583c9534c7e02250b961b1b6d8a8/librt-0.8.1.tar.gz", hash = "sha256:be46a14693955b3bd96014ccbdb8339ee8c9346fbe11c1b78901b55125f14c73", size = 177471, upload-time = "2026-02-17T16:13:06.101Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/5f/63f5fa395c7a8a93558c0904ba8f1c8d1b997ca6a3de61bc7659970d66bf/librt-0.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:81fd938344fecb9373ba1b155968c8a329491d2ce38e7ddb76f30ffb938f12dc", size = 65697, upload-time = "2026-02-17T16:11:06.903Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e0/0472cf37267b5920eff2f292ccfaede1886288ce35b7f3203d8de00abfe6/librt-0.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5db05697c82b3a2ec53f6e72b2ed373132b0c2e05135f0696784e97d7f5d48e7", size = 68376, upload-time = "2026-02-17T16:11:08.395Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8bd1359fdcd27ab897cd5963294fa4a7c83b20a8564678e4fd12157e56a5/librt-0.8.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d56bc4011975f7460bea7b33e1ff425d2f1adf419935ff6707273c77f8a4ada6", size = 197084, upload-time = "2026-02-17T16:11:09.774Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fe/163e33fdd091d0c2b102f8a60cc0a61fd730ad44e32617cd161e7cd67a01/librt-0.8.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cdc0f588ff4b663ea96c26d2a230c525c6fc62b28314edaaaca8ed5af931ad0", size = 207337, upload-time = "2026-02-17T16:11:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/01/99/f85130582f05dcf0c8902f3d629270231d2f4afdfc567f8305a952ac7f14/librt-0.8.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:97c2b54ff6717a7a563b72627990bec60d8029df17df423f0ed37d56a17a176b", size = 219980, upload-time = "2026-02-17T16:11:12.499Z" }, + { url = "https://files.pythonhosted.org/packages/6f/54/cb5e4d03659e043a26c74e08206412ac9a3742f0477d96f9761a55313b5f/librt-0.8.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8f1125e6bbf2f1657d9a2f3ccc4a2c9b0c8b176965bb565dd4d86be67eddb4b6", size = 212921, upload-time = "2026-02-17T16:11:14.484Z" }, + { url = "https://files.pythonhosted.org/packages/b1/81/a3a01e4240579c30f3487f6fed01eb4bc8ef0616da5b4ebac27ca19775f3/librt-0.8.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8f4bb453f408137d7581be309b2fbc6868a80e7ef60c88e689078ee3a296ae71", size = 221381, upload-time = "2026-02-17T16:11:17.459Z" }, + { url = "https://files.pythonhosted.org/packages/08/b0/fc2d54b4b1c6fb81e77288ff31ff25a2c1e62eaef4424a984f228839717b/librt-0.8.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c336d61d2fe74a3195edc1646d53ff1cddd3a9600b09fa6ab75e5514ba4862a7", size = 216714, upload-time = "2026-02-17T16:11:19.197Z" }, + { url = "https://files.pythonhosted.org/packages/96/96/85daa73ffbd87e1fb287d7af6553ada66bf25a2a6b0de4764344a05469f6/librt-0.8.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:eb5656019db7c4deacf0c1a55a898c5bb8f989be904597fcb5232a2f4828fa05", size = 214777, upload-time = "2026-02-17T16:11:20.443Z" }, + { url = "https://files.pythonhosted.org/packages/12/9c/c3aa7a2360383f4bf4f04d98195f2739a579128720c603f4807f006a4225/librt-0.8.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c25d9e338d5bed46c1632f851babf3d13c78f49a225462017cf5e11e845c5891", size = 237398, upload-time = "2026-02-17T16:11:22.083Z" }, + { url = "https://files.pythonhosted.org/packages/61/19/d350ea89e5274665185dabc4bbb9c3536c3411f862881d316c8b8e00eb66/librt-0.8.1-cp310-cp310-win32.whl", hash = "sha256:aaab0e307e344cb28d800957ef3ec16605146ef0e59e059a60a176d19543d1b7", size = 54285, upload-time = "2026-02-17T16:11:23.27Z" }, + { url = "https://files.pythonhosted.org/packages/4f/d6/45d587d3d41c112e9543a0093d883eb57a24a03e41561c127818aa2a6bcc/librt-0.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:56e04c14b696300d47b3bc5f1d10a00e86ae978886d0cee14e5714fafb5df5d2", size = 61352, upload-time = "2026-02-17T16:11:24.207Z" }, + { url = "https://files.pythonhosted.org/packages/1d/01/0e748af5e4fee180cf7cd12bd12b0513ad23b045dccb2a83191bde82d168/librt-0.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:681dc2451d6d846794a828c16c22dc452d924e9f700a485b7ecb887a30aad1fd", size = 65315, upload-time = "2026-02-17T16:11:25.152Z" }, + { url = "https://files.pythonhosted.org/packages/9d/4d/7184806efda571887c798d573ca4134c80ac8642dcdd32f12c31b939c595/librt-0.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3b4350b13cc0e6f5bec8fa7caf29a8fb8cdc051a3bae45cfbfd7ce64f009965", size = 68021, upload-time = "2026-02-17T16:11:26.129Z" }, + { url = "https://files.pythonhosted.org/packages/ae/88/c3c52d2a5d5101f28d3dc89298444626e7874aa904eed498464c2af17627/librt-0.8.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ac1e7817fd0ed3d14fd7c5df91daed84c48e4c2a11ee99c0547f9f62fdae13da", size = 194500, upload-time = "2026-02-17T16:11:27.177Z" }, + { url = "https://files.pythonhosted.org/packages/d6/5d/6fb0a25b6a8906e85b2c3b87bee1d6ed31510be7605b06772f9374ca5cb3/librt-0.8.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:747328be0c5b7075cde86a0e09d7a9196029800ba75a1689332348e998fb85c0", size = 205622, upload-time = "2026-02-17T16:11:28.242Z" }, + { url = "https://files.pythonhosted.org/packages/b2/a6/8006ae81227105476a45691f5831499e4d936b1c049b0c1feb17c11b02d1/librt-0.8.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f0af2bd2bc204fa27f3d6711d0f360e6b8c684a035206257a81673ab924aa11e", size = 218304, upload-time = "2026-02-17T16:11:29.344Z" }, + { url = "https://files.pythonhosted.org/packages/ee/19/60e07886ad16670aae57ef44dada41912c90906a6fe9f2b9abac21374748/librt-0.8.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d480de377f5b687b6b1bc0c0407426da556e2a757633cc7e4d2e1a057aa688f3", size = 211493, upload-time = "2026-02-17T16:11:30.445Z" }, + { url = "https://files.pythonhosted.org/packages/9c/cf/f666c89d0e861d05600438213feeb818c7514d3315bae3648b1fc145d2b6/librt-0.8.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d0ee06b5b5291f609ddb37b9750985b27bc567791bc87c76a569b3feed8481ac", size = 219129, upload-time = "2026-02-17T16:11:32.021Z" }, + { url = "https://files.pythonhosted.org/packages/8f/ef/f1bea01e40b4a879364c031476c82a0dc69ce068daad67ab96302fed2d45/librt-0.8.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e2c6f77b9ad48ce5603b83b7da9ee3e36b3ab425353f695cba13200c5d96596", size = 213113, upload-time = "2026-02-17T16:11:33.192Z" }, + { url = "https://files.pythonhosted.org/packages/9b/80/cdab544370cc6bc1b72ea369525f547a59e6938ef6863a11ab3cd24759af/librt-0.8.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:439352ba9373f11cb8e1933da194dcc6206daf779ff8df0ed69c5e39113e6a99", size = 212269, upload-time = "2026-02-17T16:11:34.373Z" }, + { url = "https://files.pythonhosted.org/packages/9d/9c/48d6ed8dac595654f15eceab2035131c136d1ae9a1e3548e777bb6dbb95d/librt-0.8.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:82210adabbc331dbb65d7868b105185464ef13f56f7f76688565ad79f648b0fe", size = 234673, upload-time = "2026-02-17T16:11:36.063Z" }, + { url = "https://files.pythonhosted.org/packages/16/01/35b68b1db517f27a01be4467593292eb5315def8900afad29fabf56304ba/librt-0.8.1-cp311-cp311-win32.whl", hash = "sha256:52c224e14614b750c0a6d97368e16804a98c684657c7518752c356834fff83bb", size = 54597, upload-time = "2026-02-17T16:11:37.544Z" }, + { url = "https://files.pythonhosted.org/packages/71/02/796fe8f02822235966693f257bf2c79f40e11337337a657a8cfebba5febc/librt-0.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:c00e5c884f528c9932d278d5c9cbbea38a6b81eb62c02e06ae53751a83a4d52b", size = 61733, upload-time = "2026-02-17T16:11:38.691Z" }, + { url = "https://files.pythonhosted.org/packages/28/ad/232e13d61f879a42a4e7117d65e4984bb28371a34bb6fb9ca54ec2c8f54e/librt-0.8.1-cp311-cp311-win_arm64.whl", hash = "sha256:f7cdf7f26c2286ffb02e46d7bac56c94655540b26347673bea15fa52a6af17e9", size = 52273, upload-time = "2026-02-17T16:11:40.308Z" }, + { url = "https://files.pythonhosted.org/packages/95/21/d39b0a87ac52fc98f621fb6f8060efb017a767ebbbac2f99fbcbc9ddc0d7/librt-0.8.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a28f2612ab566b17f3698b0da021ff9960610301607c9a5e8eaca62f5e1c350a", size = 66516, upload-time = "2026-02-17T16:11:41.604Z" }, + { url = "https://files.pythonhosted.org/packages/69/f1/46375e71441c43e8ae335905e069f1c54febee63a146278bcee8782c84fd/librt-0.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:60a78b694c9aee2a0f1aaeaa7d101cf713e92e8423a941d2897f4fa37908dab9", size = 68634, upload-time = "2026-02-17T16:11:43.268Z" }, + { url = "https://files.pythonhosted.org/packages/0a/33/c510de7f93bf1fa19e13423a606d8189a02624a800710f6e6a0a0f0784b3/librt-0.8.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:758509ea3f1eba2a57558e7e98f4659d0ea7670bff49673b0dde18a3c7e6c0eb", size = 198941, upload-time = "2026-02-17T16:11:44.28Z" }, + { url = "https://files.pythonhosted.org/packages/dd/36/e725903416409a533d92398e88ce665476f275081d0d7d42f9c4951999e5/librt-0.8.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:039b9f2c506bd0ab0f8725aa5ba339c6f0cd19d3b514b50d134789809c24285d", size = 209991, upload-time = "2026-02-17T16:11:45.462Z" }, + { url = "https://files.pythonhosted.org/packages/30/7a/8d908a152e1875c9f8eac96c97a480df425e657cdb47854b9efaa4998889/librt-0.8.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bb54f1205a3a6ab41a6fd71dfcdcbd278670d3a90ca502a30d9da583105b6f7", size = 224476, upload-time = "2026-02-17T16:11:46.542Z" }, + { url = "https://files.pythonhosted.org/packages/a8/b8/a22c34f2c485b8903a06f3fe3315341fe6876ef3599792344669db98fcff/librt-0.8.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:05bd41cdee35b0c59c259f870f6da532a2c5ca57db95b5f23689fcb5c9e42440", size = 217518, upload-time = "2026-02-17T16:11:47.746Z" }, + { url = "https://files.pythonhosted.org/packages/79/6f/5c6fea00357e4f82ba44f81dbfb027921f1ab10e320d4a64e1c408d035d9/librt-0.8.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adfab487facf03f0d0857b8710cf82d0704a309d8ffc33b03d9302b4c64e91a9", size = 225116, upload-time = "2026-02-17T16:11:49.298Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a0/95ced4e7b1267fe1e2720a111685bcddf0e781f7e9e0ce59d751c44dcfe5/librt-0.8.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:153188fe98a72f206042be10a2c6026139852805215ed9539186312d50a8e972", size = 217751, upload-time = "2026-02-17T16:11:50.49Z" }, + { url = "https://files.pythonhosted.org/packages/93/c2/0517281cb4d4101c27ab59472924e67f55e375bc46bedae94ac6dc6e1902/librt-0.8.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:dd3c41254ee98604b08bd5b3af5bf0a89740d4ee0711de95b65166bf44091921", size = 218378, upload-time = "2026-02-17T16:11:51.783Z" }, + { url = "https://files.pythonhosted.org/packages/43/e8/37b3ac108e8976888e559a7b227d0ceac03c384cfd3e7a1c2ee248dbae79/librt-0.8.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e0d138c7ae532908cbb342162b2611dbd4d90c941cd25ab82084aaf71d2c0bd0", size = 241199, upload-time = "2026-02-17T16:11:53.561Z" }, + { url = "https://files.pythonhosted.org/packages/4b/5b/35812d041c53967fedf551a39399271bbe4257e681236a2cf1a69c8e7fa1/librt-0.8.1-cp312-cp312-win32.whl", hash = "sha256:43353b943613c5d9c49a25aaffdba46f888ec354e71e3529a00cca3f04d66a7a", size = 54917, upload-time = "2026-02-17T16:11:54.758Z" }, + { url = "https://files.pythonhosted.org/packages/de/d1/fa5d5331b862b9775aaf2a100f5ef86854e5d4407f71bddf102f4421e034/librt-0.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:ff8baf1f8d3f4b6b7257fcb75a501f2a5499d0dda57645baa09d4d0d34b19444", size = 62017, upload-time = "2026-02-17T16:11:55.748Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7c/c614252f9acda59b01a66e2ddfd243ed1c7e1deab0293332dfbccf862808/librt-0.8.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f2ae3725904f7377e11cc37722d5d401e8b3d5851fb9273d7f4fe04f6b3d37d", size = 52441, upload-time = "2026-02-17T16:11:56.801Z" }, + { url = "https://files.pythonhosted.org/packages/c5/3c/f614c8e4eaac7cbf2bbdf9528790b21d89e277ee20d57dc6e559c626105f/librt-0.8.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7e6bad1cd94f6764e1e21950542f818a09316645337fd5ab9a7acc45d99a8f35", size = 66529, upload-time = "2026-02-17T16:11:57.809Z" }, + { url = "https://files.pythonhosted.org/packages/ab/96/5836544a45100ae411eda07d29e3d99448e5258b6e9c8059deb92945f5c2/librt-0.8.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cf450f498c30af55551ba4f66b9123b7185362ec8b625a773b3d39aa1a717583", size = 68669, upload-time = "2026-02-17T16:11:58.843Z" }, + { url = "https://files.pythonhosted.org/packages/06/53/f0b992b57af6d5531bf4677d75c44f095f2366a1741fb695ee462ae04b05/librt-0.8.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:eca45e982fa074090057132e30585a7e8674e9e885d402eae85633e9f449ce6c", size = 199279, upload-time = "2026-02-17T16:11:59.862Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ad/4848cc16e268d14280d8168aee4f31cea92bbd2b79ce33d3e166f2b4e4fc/librt-0.8.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c3811485fccfda840861905b8c70bba5ec094e02825598bb9d4ca3936857a04", size = 210288, upload-time = "2026-02-17T16:12:00.954Z" }, + { url = "https://files.pythonhosted.org/packages/52/05/27fdc2e95de26273d83b96742d8d3b7345f2ea2bdbd2405cc504644f2096/librt-0.8.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e4af413908f77294605e28cfd98063f54b2c790561383971d2f52d113d9c363", size = 224809, upload-time = "2026-02-17T16:12:02.108Z" }, + { url = "https://files.pythonhosted.org/packages/7a/d0/78200a45ba3240cb042bc597d6f2accba9193a2c57d0356268cbbe2d0925/librt-0.8.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5212a5bd7fae98dae95710032902edcd2ec4dc994e883294f75c857b83f9aba0", size = 218075, upload-time = "2026-02-17T16:12:03.631Z" }, + { url = "https://files.pythonhosted.org/packages/af/72/a210839fa74c90474897124c064ffca07f8d4b347b6574d309686aae7ca6/librt-0.8.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e692aa2d1d604e6ca12d35e51fdc36f4cda6345e28e36374579f7ef3611b3012", size = 225486, upload-time = "2026-02-17T16:12:04.725Z" }, + { url = "https://files.pythonhosted.org/packages/a3/c1/a03cc63722339ddbf087485f253493e2b013039f5b707e8e6016141130fa/librt-0.8.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4be2a5c926b9770c9e08e717f05737a269b9d0ebc5d2f0060f0fe3fe9ce47acb", size = 218219, upload-time = "2026-02-17T16:12:05.828Z" }, + { url = "https://files.pythonhosted.org/packages/58/f5/fff6108af0acf941c6f274a946aea0e484bd10cd2dc37610287ce49388c5/librt-0.8.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fd1a720332ea335ceb544cf0a03f81df92abd4bb887679fd1e460976b0e6214b", size = 218750, upload-time = "2026-02-17T16:12:07.09Z" }, + { url = "https://files.pythonhosted.org/packages/71/67/5a387bfef30ec1e4b4f30562c8586566faf87e47d696768c19feb49e3646/librt-0.8.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2af9e01e0ef80d95ae3c720be101227edae5f2fe7e3dc63d8857fadfc5a1d", size = 241624, upload-time = "2026-02-17T16:12:08.43Z" }, + { url = "https://files.pythonhosted.org/packages/d4/be/24f8502db11d405232ac1162eb98069ca49c3306c1d75c6ccc61d9af8789/librt-0.8.1-cp313-cp313-win32.whl", hash = "sha256:086a32dbb71336627e78cc1d6ee305a68d038ef7d4c39aaff41ae8c9aa46e91a", size = 54969, upload-time = "2026-02-17T16:12:09.633Z" }, + { url = "https://files.pythonhosted.org/packages/5c/73/c9fdf6cb2a529c1a092ce769a12d88c8cca991194dfe641b6af12fa964d2/librt-0.8.1-cp313-cp313-win_amd64.whl", hash = "sha256:e11769a1dbda4da7b00a76cfffa67aa47cfa66921d2724539eee4b9ede780b79", size = 62000, upload-time = "2026-02-17T16:12:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/d3/97/68f80ca3ac4924f250cdfa6e20142a803e5e50fca96ef5148c52ee8c10ea/librt-0.8.1-cp313-cp313-win_arm64.whl", hash = "sha256:924817ab3141aca17893386ee13261f1d100d1ef410d70afe4389f2359fea4f0", size = 52495, upload-time = "2026-02-17T16:12:11.633Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6a/907ef6800f7bca71b525a05f1839b21f708c09043b1c6aa77b6b827b3996/librt-0.8.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6cfa7fe54fd4d1f47130017351a959fe5804bda7a0bc7e07a2cdbc3fdd28d34f", size = 66081, upload-time = "2026-02-17T16:12:12.766Z" }, + { url = "https://files.pythonhosted.org/packages/1b/18/25e991cd5640c9fb0f8d91b18797b29066b792f17bf8493da183bf5caabe/librt-0.8.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:228c2409c079f8c11fb2e5d7b277077f694cb93443eb760e00b3b83cb8b3176c", size = 68309, upload-time = "2026-02-17T16:12:13.756Z" }, + { url = "https://files.pythonhosted.org/packages/a4/36/46820d03f058cfb5a9de5940640ba03165ed8aded69e0733c417bb04df34/librt-0.8.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7aae78ab5e3206181780e56912d1b9bb9f90a7249ce12f0e8bf531d0462dd0fc", size = 196804, upload-time = "2026-02-17T16:12:14.818Z" }, + { url = "https://files.pythonhosted.org/packages/59/18/5dd0d3b87b8ff9c061849fbdb347758d1f724b9a82241aa908e0ec54ccd0/librt-0.8.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:172d57ec04346b047ca6af181e1ea4858086c80bdf455f61994c4aa6fc3f866c", size = 206907, upload-time = "2026-02-17T16:12:16.513Z" }, + { url = "https://files.pythonhosted.org/packages/d1/96/ef04902aad1424fd7299b62d1890e803e6ab4018c3044dca5922319c4b97/librt-0.8.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b1977c4ea97ce5eb7755a78fae68d87e4102e4aaf54985e8b56806849cc06a3", size = 221217, upload-time = "2026-02-17T16:12:17.906Z" }, + { url = "https://files.pythonhosted.org/packages/6d/ff/7e01f2dda84a8f5d280637a2e5827210a8acca9a567a54507ef1c75b342d/librt-0.8.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:10c42e1f6fd06733ef65ae7bebce2872bcafd8d6e6b0a08fe0a05a23b044fb14", size = 214622, upload-time = "2026-02-17T16:12:19.108Z" }, + { url = "https://files.pythonhosted.org/packages/1e/8c/5b093d08a13946034fed57619742f790faf77058558b14ca36a6e331161e/librt-0.8.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4c8dfa264b9193c4ee19113c985c95f876fae5e51f731494fc4e0cf594990ba7", size = 221987, upload-time = "2026-02-17T16:12:20.331Z" }, + { url = "https://files.pythonhosted.org/packages/d3/cc/86b0b3b151d40920ad45a94ce0171dec1aebba8a9d72bb3fa00c73ab25dd/librt-0.8.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:01170b6729a438f0dedc4a26ed342e3dc4f02d1000b4b19f980e1877f0c297e6", size = 215132, upload-time = "2026-02-17T16:12:21.54Z" }, + { url = "https://files.pythonhosted.org/packages/fc/be/8588164a46edf1e69858d952654e216a9a91174688eeefb9efbb38a9c799/librt-0.8.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:7b02679a0d783bdae30d443025b94465d8c3dc512f32f5b5031f93f57ac32071", size = 215195, upload-time = "2026-02-17T16:12:23.073Z" }, + { url = "https://files.pythonhosted.org/packages/f5/f2/0b9279bea735c734d69344ecfe056c1ba211694a72df10f568745c899c76/librt-0.8.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:190b109bb69592a3401fe1ffdea41a2e73370ace2ffdc4a0e8e2b39cdea81b78", size = 237946, upload-time = "2026-02-17T16:12:24.275Z" }, + { url = "https://files.pythonhosted.org/packages/e9/cc/5f2a34fbc8aeb35314a3641f9956fa9051a947424652fad9882be7a97949/librt-0.8.1-cp314-cp314-win32.whl", hash = "sha256:e70a57ecf89a0f64c24e37f38d3fe217a58169d2fe6ed6d70554964042474023", size = 50689, upload-time = "2026-02-17T16:12:25.766Z" }, + { url = "https://files.pythonhosted.org/packages/a0/76/cd4d010ab2147339ca2b93e959c3686e964edc6de66ddacc935c325883d7/librt-0.8.1-cp314-cp314-win_amd64.whl", hash = "sha256:7e2f3edca35664499fbb36e4770650c4bd4a08abc1f4458eab9df4ec56389730", size = 57875, upload-time = "2026-02-17T16:12:27.465Z" }, + { url = "https://files.pythonhosted.org/packages/84/0f/2143cb3c3ca48bd3379dcd11817163ca50781927c4537345d608b5045998/librt-0.8.1-cp314-cp314-win_arm64.whl", hash = "sha256:0d2f82168e55ddefd27c01c654ce52379c0750ddc31ee86b4b266bcf4d65f2a3", size = 48058, upload-time = "2026-02-17T16:12:28.556Z" }, + { url = "https://files.pythonhosted.org/packages/d2/0e/9b23a87e37baf00311c3efe6b48d6b6c168c29902dfc3f04c338372fd7db/librt-0.8.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c74a2da57a094bd48d03fa5d196da83d2815678385d2978657499063709abe1", size = 68313, upload-time = "2026-02-17T16:12:29.659Z" }, + { url = "https://files.pythonhosted.org/packages/db/9a/859c41e5a4f1c84200a7d2b92f586aa27133c8243b6cac9926f6e54d01b9/librt-0.8.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a355d99c4c0d8e5b770313b8b247411ed40949ca44e33e46a4789b9293a907ee", size = 70994, upload-time = "2026-02-17T16:12:31.516Z" }, + { url = "https://files.pythonhosted.org/packages/4c/28/10605366ee599ed34223ac2bf66404c6fb59399f47108215d16d5ad751a8/librt-0.8.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2eb345e8b33fb748227409c9f1233d4df354d6e54091f0e8fc53acdb2ffedeb7", size = 220770, upload-time = "2026-02-17T16:12:33.294Z" }, + { url = "https://files.pythonhosted.org/packages/af/8d/16ed8fd452dafae9c48d17a6bc1ee3e818fd40ef718d149a8eff2c9f4ea2/librt-0.8.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9be2f15e53ce4e83cc08adc29b26fb5978db62ef2a366fbdf716c8a6c8901040", size = 235409, upload-time = "2026-02-17T16:12:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/89/1b/7bdf3e49349c134b25db816e4a3db6b94a47ac69d7d46b1e682c2c4949be/librt-0.8.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:785ae29c1f5c6e7c2cde2c7c0e148147f4503da3abc5d44d482068da5322fd9e", size = 246473, upload-time = "2026-02-17T16:12:36.656Z" }, + { url = "https://files.pythonhosted.org/packages/4e/8a/91fab8e4fd2a24930a17188c7af5380eb27b203d72101c9cc000dbdfd95a/librt-0.8.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d3a7da44baf692f0c6aeb5b2a09c5e6fc7a703bca9ffa337ddd2e2da53f7732", size = 238866, upload-time = "2026-02-17T16:12:37.849Z" }, + { url = "https://files.pythonhosted.org/packages/b9/e0/c45a098843fc7c07e18a7f8a24ca8496aecbf7bdcd54980c6ca1aaa79a8e/librt-0.8.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fc48998000cbc39ec0d5311312dda93ecf92b39aaf184c5e817d5d440b29624", size = 250248, upload-time = "2026-02-17T16:12:39.445Z" }, + { url = "https://files.pythonhosted.org/packages/82/30/07627de23036640c952cce0c1fe78972e77d7d2f8fd54fa5ef4554ff4a56/librt-0.8.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:e96baa6820280077a78244b2e06e416480ed859bbd8e5d641cf5742919d8beb4", size = 240629, upload-time = "2026-02-17T16:12:40.889Z" }, + { url = "https://files.pythonhosted.org/packages/fb/c1/55bfe1ee3542eba055616f9098eaf6eddb966efb0ca0f44eaa4aba327307/librt-0.8.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:31362dbfe297b23590530007062c32c6f6176f6099646bb2c95ab1b00a57c382", size = 239615, upload-time = "2026-02-17T16:12:42.446Z" }, + { url = "https://files.pythonhosted.org/packages/2b/39/191d3d28abc26c9099b19852e6c99f7f6d400b82fa5a4e80291bd3803e19/librt-0.8.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cc3656283d11540ab0ea01978378e73e10002145117055e03722417aeab30994", size = 263001, upload-time = "2026-02-17T16:12:43.627Z" }, + { url = "https://files.pythonhosted.org/packages/b9/eb/7697f60fbe7042ab4e88f4ee6af496b7f222fffb0a4e3593ef1f29f81652/librt-0.8.1-cp314-cp314t-win32.whl", hash = "sha256:738f08021b3142c2918c03692608baed43bc51144c29e35807682f8070ee2a3a", size = 51328, upload-time = "2026-02-17T16:12:45.148Z" }, + { url = "https://files.pythonhosted.org/packages/7c/72/34bf2eb7a15414a23e5e70ecb9440c1d3179f393d9349338a91e2781c0fb/librt-0.8.1-cp314-cp314t-win_amd64.whl", hash = "sha256:89815a22daf9c51884fb5dbe4f1ef65ee6a146e0b6a8df05f753e2e4a9359bf4", size = 58722, upload-time = "2026-02-17T16:12:46.85Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c8/d148e041732d631fc76036f8b30fae4e77b027a1e95b7a84bb522481a940/librt-0.8.1-cp314-cp314t-win_arm64.whl", hash = "sha256:bf512a71a23504ed08103a13c941f763db13fb11177beb3d9244c98c29fb4a61", size = 48755, upload-time = "2026-02-17T16:12:47.943Z" }, ] [[package]] @@ -1954,11 +1999,11 @@ wheels = [ [[package]] name = "markdown" -version = "3.10.1" +version = "3.10.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b7/b1/af95bcae8549f1f3fd70faacb29075826a0d689a27f232e8cee315efa053/markdown-3.10.1.tar.gz", hash = "sha256:1c19c10bd5c14ac948c53d0d762a04e2fa35a6d58a6b7b1e6bfcbe6fefc0001a", size = 365402, upload-time = "2026-01-21T18:09:28.206Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2b/f4/69fa6ed85ae003c2378ffa8f6d2e3234662abd02c10d216c0ba96081a238/markdown-3.10.2.tar.gz", hash = "sha256:994d51325d25ad8aa7ce4ebaec003febcce822c3f8c911e3b17c52f7f589f950", size = 368805, upload-time = "2026-02-09T14:57:26.942Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/59/1b/6ef961f543593969d25b2afe57a3564200280528caa9bd1082eecdd7b3bc/markdown-3.10.1-py3-none-any.whl", hash = "sha256:867d788939fe33e4b736426f5b9f651ad0c0ae0ecf89df0ca5d1176c70812fe3", size = 107684, upload-time = "2026-01-21T18:09:27.203Z" }, + { url = "https://files.pythonhosted.org/packages/de/1f/77fa3081e4f66ca3576c896ae5d31c3002ac6607f9747d2e3aa49227e464/markdown-3.10.2-py3-none-any.whl", hash = "sha256:e91464b71ae3ee7afd3017d9f358ef0baf158fd9a298db92f1d4761133824c36", size = 108180, upload-time = "2026-02-09T14:57:25.787Z" }, ] [[package]] @@ -2115,7 +2160,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.12.0" +version = "1.12.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "eval-type-backport" }, @@ -2124,15 +2169,14 @@ dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-exporter-otlp-proto-http" }, { name = "opentelemetry-sdk" }, - { name = "opentelemetry-semantic-conventions" }, { name = "pydantic" }, { name = "python-dateutil" }, { name = "pyyaml" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/35/7c/4c2404f49a9b0a3dcf7ce9e05b4749cf3d53f203e5724b430fa07bfebb48/mistralai-1.12.0.tar.gz", hash = "sha256:38149a598eab7ed4b876c92e6e1c89d31fe01935bbf19314549fd32b21a1135f", size = 242013, upload-time = "2026-02-04T14:50:04.166Z" } +sdist = { url = "https://files.pythonhosted.org/packages/46/ad/3d3b17a768f641ab428bbe7c4a75283db029737778d4f56cb4a9145ba54f/mistralai-1.12.3.tar.gz", hash = "sha256:d59a788e82c16fd7d340f9f2e722ed0897fe15ccce797278b836d65fa671ef6e", size = 242961, upload-time = "2026-02-17T15:41:29Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/c5/f188efddb2b6debf73f2b2fea70eb119d1935ee33a92947d0756e22476ba/mistralai-1.12.0-py3-none-any.whl", hash = "sha256:a5873d456b7920782f716d60593d4db32c9510b13fc98908081b9f36427e0e5a", size = 500154, upload-time = "2026-02-04T14:50:05.628Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3c/d17250578195b90b0a7f2500c4d587996c9b79470dbcfe7fb9d24feb9d1b/mistralai-1.12.3-py3-none-any.whl", hash = "sha256:e164e070011dd7759ad5d969c44359939d7d73f7fec787667317b7e81ffc5a8b", size = 502976, upload-time = "2026-02-17T15:41:30.648Z" }, ] [[package]] @@ -2231,7 +2275,7 @@ wheels = [ [[package]] name = "moto" -version = "5.1.20" +version = "5.1.21" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "boto3" }, @@ -2244,9 +2288,9 @@ dependencies = [ { name = "werkzeug" }, { name = "xmltodict" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b4/93/6b696aab5174721696a17716a488086e21f7b2547b4c9517f799a9b25e9e/moto-5.1.20.tar.gz", hash = "sha256:6d12d781e26a550d80e4b7e01d5538178e3adec6efbdec870e06e84750f13ec0", size = 8318716, upload-time = "2026-01-17T21:49:00.101Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/f8/81e2ee90f47a6ae1e475a961bd6a1a1569b04999ba941897b87101b0d5af/moto-5.1.21.tar.gz", hash = "sha256:713dde46e71e2714fa9a29eec513ec618d35e1d84c256331b5aab3f30692feeb", size = 8441171, upload-time = "2026-02-08T21:52:39.157Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7f/2f/f50892fdb28097917b87d358a5fcefd30976289884ff142893edcb0243ba/moto-5.1.20-py3-none-any.whl", hash = "sha256:58c82c8e6b2ef659ef3a562fa415dce14da84bc7a797943245d9a338496ea0ea", size = 6392751, upload-time = "2026-01-17T21:48:57.099Z" }, + { url = "https://files.pythonhosted.org/packages/53/c7/4b0bc06f0811caa67f7e8c3ca2e637bd8cb4317c2f8839b7d643d7ace68c/moto-5.1.21-py3-none-any.whl", hash = "sha256:311a30095b08b39dd2707f161f1440d361684fe0090b9fd0751dfd1c9b022445", size = 6514163, upload-time = "2026-02-08T21:52:36.91Z" }, ] [package.optional-dependencies] @@ -2344,6 +2388,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/81/f2/08ace4142eb281c12701fc3b93a10795e4d4dc7f753911d836675050f886/msgpack-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d99ef64f349d5ec3293688e91486c5fdb925ed03807f64d98d205d2713c60b46", size = 70868, upload-time = "2025-10-08T09:15:44.959Z" }, ] +[[package]] +name = "mthds" +version = "0.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backports-strenum", marker = "python_full_version < '3.11'" }, + { name = "httpx" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/65/03/440ef52bb67159e3f3f7efc08879a917cbb7039f0037a008fd7f1c7c40cd/mthds-0.0.1.tar.gz", hash = "sha256:e84f82eae611443ff9e4fca6a13c91ef17daacf5bd9a664023286b6b39ee42dd", size = 60075, upload-time = "2026-02-17T17:50:40.016Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/7c/45cec5ffbe5051cb2d72f94fbc2aefaaa3158bdc1c849809c9ffe1e6057f/mthds-0.0.1-py3-none-any.whl", hash = "sha256:3550ce58c84b943bbe09d7f5f2ff454728dce8b823253322ecf682c638ff4a3c", size = 11823, upload-time = "2026-02-17T17:50:38.252Z" }, +] + [[package]] name = "multidict" version = "6.7.1" @@ -2916,7 +2974,7 @@ wheels = [ [[package]] name = "openai" -version = "2.17.0" +version = "2.21.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2928,9 +2986,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9c/a2/677f22c4b487effb8a09439fb6134034b5f0a39ca27df8b95fac23a93720/openai-2.17.0.tar.gz", hash = "sha256:47224b74bd20f30c6b0a6a329505243cb2f26d5cf84d9f8d0825ff8b35e9c999", size = 631445, upload-time = "2026-02-05T16:27:40.953Z" } +sdist = { url = "https://files.pythonhosted.org/packages/92/e5/3d197a0947a166649f566706d7a4c8f7fe38f1fa7b24c9bcffe4c7591d44/openai-2.21.0.tar.gz", hash = "sha256:81b48ce4b8bbb2cc3af02047ceb19561f7b1dc0d4e52d1de7f02abfd15aa59b7", size = 644374, upload-time = "2026-02-14T00:12:01.577Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/44/97/284535aa75e6e84ab388248b5a323fc296b1f70530130dee37f7f4fbe856/openai-2.17.0-py3-none-any.whl", hash = "sha256:4f393fd886ca35e113aac7ff239bcd578b81d8f104f5aedc7d3693eb2af1d338", size = 1069524, upload-time = "2026-02-05T16:27:38.941Z" }, + { url = "https://files.pythonhosted.org/packages/cc/56/0a89092a453bb2c676d66abee44f863e742b2110d4dbb1dbcca3f7e5fc33/openai-2.21.0-py3-none-any.whl", hash = "sha256:0bc1c775e5b1536c294eded39ee08f8407656537ccc71b1004104fe1602e267c", size = 1103065, upload-time = "2026-02-14T00:11:59.603Z" }, ] [[package]] @@ -2966,32 +3024,32 @@ wheels = [ [[package]] name = "opentelemetry-api" -version = "1.38.0" +version = "1.39.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.38.0" +version = "1.39.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-proto" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/19/83/dd4660f2956ff88ed071e9e0e36e830df14b8c5dc06722dbde1841accbe8/opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c", size = 20431, upload-time = "2025-10-16T08:35:53.285Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/9d/22d241b66f7bbde88a3bfa6847a351d2c46b84de23e71222c6aae25c7050/opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464", size = 20409, upload-time = "2025-12-11T13:32:40.885Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a7/9e/55a41c9601191e8cd8eb626b54ee6827b9c9d4a46d736f32abc80d8039fc/opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a", size = 18359, upload-time = "2025-10-16T08:35:34.099Z" }, + { url = "https://files.pythonhosted.org/packages/8c/02/ffc3e143d89a27ac21fd557365b98bd0653b98de8a101151d5805b5d4c33/opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde", size = 18366, upload-time = "2025-12-11T13:32:20.2Z" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-http" -version = "1.38.0" +version = "1.39.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "googleapis-common-protos" }, @@ -3002,48 +3060,48 @@ dependencies = [ { name = "requests" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/81/0a/debcdfb029fbd1ccd1563f7c287b89a6f7bef3b2902ade56797bfd020854/opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b", size = 17282, upload-time = "2025-10-16T08:35:54.422Z" } +sdist = { url = "https://files.pythonhosted.org/packages/80/04/2a08fa9c0214ae38880df01e8bfae12b067ec0793446578575e5080d6545/opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb", size = 17288, upload-time = "2025-12-11T13:32:42.029Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e5/77/154004c99fb9f291f74aa0822a2f5bbf565a72d8126b3a1b63ed8e5f83c7/opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b", size = 19579, upload-time = "2025-10-16T08:35:36.269Z" }, + { url = "https://files.pythonhosted.org/packages/95/f1/b27d3e2e003cd9a3592c43d099d2ed8d0a947c15281bf8463a256db0b46c/opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985", size = 19641, upload-time = "2025-12-11T13:32:22.248Z" }, ] [[package]] name = "opentelemetry-proto" -version = "1.38.0" +version = "1.39.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } +sdist = { url = "https://files.pythonhosted.org/packages/49/1d/f25d76d8260c156c40c97c9ed4511ec0f9ce353f8108ca6e7561f82a06b2/opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8", size = 46152, upload-time = "2025-12-11T13:32:48.681Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, + { url = "https://files.pythonhosted.org/packages/51/95/b40c96a7b5203005a0b03d8ce8cd212ff23f1793d5ba289c87a097571b18/opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007", size = 72535, upload-time = "2025-12-11T13:32:33.866Z" }, ] [[package]] name = "opentelemetry-sdk" -version = "1.38.0" +version = "1.39.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, + { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.59b0" +version = "0.60b1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } +sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, + { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, ] [[package]] @@ -3137,104 +3195,100 @@ wheels = [ [[package]] name = "pillow" -version = "11.3.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4c/5d/45a3553a253ac8763f3561371432a90bdbe6000fbdcf1397ffe502aa206c/pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860", size = 5316554, upload-time = "2025-07-01T09:13:39.342Z" }, - { url = "https://files.pythonhosted.org/packages/7c/c8/67c12ab069ef586a25a4a79ced553586748fad100c77c0ce59bb4983ac98/pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad", size = 4686548, upload-time = "2025-07-01T09:13:41.835Z" }, - { url = "https://files.pythonhosted.org/packages/2f/bd/6741ebd56263390b382ae4c5de02979af7f8bd9807346d068700dd6d5cf9/pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0", size = 5859742, upload-time = "2025-07-03T13:09:47.439Z" }, - { url = "https://files.pythonhosted.org/packages/ca/0b/c412a9e27e1e6a829e6ab6c2dca52dd563efbedf4c9c6aa453d9a9b77359/pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b", size = 7633087, upload-time = "2025-07-03T13:09:51.796Z" }, - { url = "https://files.pythonhosted.org/packages/59/9d/9b7076aaf30f5dd17e5e5589b2d2f5a5d7e30ff67a171eb686e4eecc2adf/pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50", size = 5963350, upload-time = "2025-07-01T09:13:43.865Z" }, - { url = "https://files.pythonhosted.org/packages/f0/16/1a6bf01fb622fb9cf5c91683823f073f053005c849b1f52ed613afcf8dae/pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae", size = 6631840, upload-time = "2025-07-01T09:13:46.161Z" }, - { url = "https://files.pythonhosted.org/packages/7b/e6/6ff7077077eb47fde78739e7d570bdcd7c10495666b6afcd23ab56b19a43/pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9", size = 6074005, upload-time = "2025-07-01T09:13:47.829Z" }, - { url = "https://files.pythonhosted.org/packages/c3/3a/b13f36832ea6d279a697231658199e0a03cd87ef12048016bdcc84131601/pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e", size = 6708372, upload-time = "2025-07-01T09:13:52.145Z" }, - { url = "https://files.pythonhosted.org/packages/6c/e4/61b2e1a7528740efbc70b3d581f33937e38e98ef3d50b05007267a55bcb2/pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6", size = 6277090, upload-time = "2025-07-01T09:13:53.915Z" }, - { url = "https://files.pythonhosted.org/packages/a9/d3/60c781c83a785d6afbd6a326ed4d759d141de43aa7365725cbcd65ce5e54/pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f", size = 6985988, upload-time = "2025-07-01T09:13:55.699Z" }, - { url = "https://files.pythonhosted.org/packages/9f/28/4f4a0203165eefb3763939c6789ba31013a2e90adffb456610f30f613850/pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f", size = 2422899, upload-time = "2025-07-01T09:13:57.497Z" }, - { url = "https://files.pythonhosted.org/packages/db/26/77f8ed17ca4ffd60e1dcd220a6ec6d71210ba398cfa33a13a1cd614c5613/pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722", size = 5316531, upload-time = "2025-07-01T09:13:59.203Z" }, - { url = "https://files.pythonhosted.org/packages/cb/39/ee475903197ce709322a17a866892efb560f57900d9af2e55f86db51b0a5/pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288", size = 4686560, upload-time = "2025-07-01T09:14:01.101Z" }, - { url = "https://files.pythonhosted.org/packages/d5/90/442068a160fd179938ba55ec8c97050a612426fae5ec0a764e345839f76d/pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d", size = 5870978, upload-time = "2025-07-03T13:09:55.638Z" }, - { url = "https://files.pythonhosted.org/packages/13/92/dcdd147ab02daf405387f0218dcf792dc6dd5b14d2573d40b4caeef01059/pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494", size = 7641168, upload-time = "2025-07-03T13:10:00.37Z" }, - { url = "https://files.pythonhosted.org/packages/6e/db/839d6ba7fd38b51af641aa904e2960e7a5644d60ec754c046b7d2aee00e5/pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58", size = 5973053, upload-time = "2025-07-01T09:14:04.491Z" }, - { url = "https://files.pythonhosted.org/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f", size = 6640273, upload-time = "2025-07-01T09:14:06.235Z" }, - { url = "https://files.pythonhosted.org/packages/45/ad/931694675ede172e15b2ff03c8144a0ddaea1d87adb72bb07655eaffb654/pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e", size = 6082043, upload-time = "2025-07-01T09:14:07.978Z" }, - { url = "https://files.pythonhosted.org/packages/3a/04/ba8f2b11fc80d2dd462d7abec16351b45ec99cbbaea4387648a44190351a/pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94", size = 6715516, upload-time = "2025-07-01T09:14:10.233Z" }, - { url = "https://files.pythonhosted.org/packages/48/59/8cd06d7f3944cc7d892e8533c56b0acb68399f640786313275faec1e3b6f/pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0", size = 6274768, upload-time = "2025-07-01T09:14:11.921Z" }, - { url = "https://files.pythonhosted.org/packages/f1/cc/29c0f5d64ab8eae20f3232da8f8571660aa0ab4b8f1331da5c2f5f9a938e/pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac", size = 6986055, upload-time = "2025-07-01T09:14:13.623Z" }, - { url = "https://files.pythonhosted.org/packages/c6/df/90bd886fabd544c25addd63e5ca6932c86f2b701d5da6c7839387a076b4a/pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd", size = 2423079, upload-time = "2025-07-01T09:14:15.268Z" }, - { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" }, - { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" }, - { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" }, - { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" }, - { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" }, - { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" }, - { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" }, - { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" }, - { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" }, - { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" }, - { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" }, - { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" }, - { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" }, - { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" }, - { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" }, - { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" }, - { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" }, - { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" }, - { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" }, - { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" }, - { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" }, - { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" }, - { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" }, - { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" }, - { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" }, - { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" }, - { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" }, - { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" }, - { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" }, - { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" }, - { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" }, - { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" }, - { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" }, - { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" }, - { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" }, - { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" }, - { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" }, - { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" }, - { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" }, - { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" }, - { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" }, - { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" }, - { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" }, - { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" }, - { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" }, - { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" }, - { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" }, - { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" }, - { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" }, - { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" }, - { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" }, - { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" }, - { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" }, - { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" }, - { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" }, - { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" }, - { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" }, - { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" }, - { url = "https://files.pythonhosted.org/packages/6f/8b/209bd6b62ce8367f47e68a218bffac88888fdf2c9fcf1ecadc6c3ec1ebc7/pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967", size = 5270556, upload-time = "2025-07-01T09:16:09.961Z" }, - { url = "https://files.pythonhosted.org/packages/2e/e6/231a0b76070c2cfd9e260a7a5b504fb72da0a95279410fa7afd99d9751d6/pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe", size = 4654625, upload-time = "2025-07-01T09:16:11.913Z" }, - { url = "https://files.pythonhosted.org/packages/13/f4/10cf94fda33cb12765f2397fc285fa6d8eb9c29de7f3185165b702fc7386/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c", size = 4874207, upload-time = "2025-07-03T13:11:10.201Z" }, - { url = "https://files.pythonhosted.org/packages/72/c9/583821097dc691880c92892e8e2d41fe0a5a3d6021f4963371d2f6d57250/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25", size = 6583939, upload-time = "2025-07-03T13:11:15.68Z" }, - { url = "https://files.pythonhosted.org/packages/3b/8e/5c9d410f9217b12320efc7c413e72693f48468979a013ad17fd690397b9a/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27", size = 4957166, upload-time = "2025-07-01T09:16:13.74Z" }, - { url = "https://files.pythonhosted.org/packages/62/bb/78347dbe13219991877ffb3a91bf09da8317fbfcd4b5f9140aeae020ad71/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a", size = 5581482, upload-time = "2025-07-01T09:16:16.107Z" }, - { url = "https://files.pythonhosted.org/packages/d9/28/1000353d5e61498aaeaaf7f1e4b49ddb05f2c6575f9d4f9f914a3538b6e1/pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f", size = 6984596, upload-time = "2025-07-01T09:16:18.07Z" }, - { url = "https://files.pythonhosted.org/packages/9e/e3/6fa84033758276fb31da12e5fb66ad747ae83b93c67af17f8c6ff4cc8f34/pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6", size = 5270566, upload-time = "2025-07-01T09:16:19.801Z" }, - { url = "https://files.pythonhosted.org/packages/5b/ee/e8d2e1ab4892970b561e1ba96cbd59c0d28cf66737fc44abb2aec3795a4e/pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438", size = 4654618, upload-time = "2025-07-01T09:16:21.818Z" }, - { url = "https://files.pythonhosted.org/packages/f2/6d/17f80f4e1f0761f02160fc433abd4109fa1548dcfdca46cfdadaf9efa565/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3", size = 4874248, upload-time = "2025-07-03T13:11:20.738Z" }, - { url = "https://files.pythonhosted.org/packages/de/5f/c22340acd61cef960130585bbe2120e2fd8434c214802f07e8c03596b17e/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c", size = 6583963, upload-time = "2025-07-03T13:11:26.283Z" }, - { url = "https://files.pythonhosted.org/packages/31/5e/03966aedfbfcbb4d5f8aa042452d3361f325b963ebbadddac05b122e47dd/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361", size = 4957170, upload-time = "2025-07-01T09:16:23.762Z" }, - { url = "https://files.pythonhosted.org/packages/cc/2d/e082982aacc927fc2cab48e1e731bdb1643a1406acace8bed0900a61464e/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7", size = 5581505, upload-time = "2025-07-01T09:16:25.593Z" }, - { url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" }, +version = "12.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/30/5bd3d794762481f8c8ae9c80e7b76ecea73b916959eb587521358ef0b2f9/pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0", size = 5304099, upload-time = "2026-02-11T04:20:06.13Z" }, + { url = "https://files.pythonhosted.org/packages/bd/c1/aab9e8f3eeb4490180e357955e15c2ef74b31f64790ff356c06fb6cf6d84/pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713", size = 4657880, upload-time = "2026-02-11T04:20:09.291Z" }, + { url = "https://files.pythonhosted.org/packages/f1/0a/9879e30d56815ad529d3985aeff5af4964202425c27261a6ada10f7cbf53/pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b", size = 6222587, upload-time = "2026-02-11T04:20:10.82Z" }, + { url = "https://files.pythonhosted.org/packages/5a/5f/a1b72ff7139e4f89014e8d451442c74a774d5c43cd938fb0a9f878576b37/pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b", size = 8027678, upload-time = "2026-02-11T04:20:12.455Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c2/c7cb187dac79a3d22c3ebeae727abee01e077c8c7d930791dc592f335153/pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4", size = 6335777, upload-time = "2026-02-11T04:20:14.441Z" }, + { url = "https://files.pythonhosted.org/packages/0c/7b/f9b09a7804ec7336effb96c26d37c29d27225783dc1501b7d62dcef6ae25/pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4", size = 7027140, upload-time = "2026-02-11T04:20:16.387Z" }, + { url = "https://files.pythonhosted.org/packages/98/b2/2fa3c391550bd421b10849d1a2144c44abcd966daadd2f7c12e19ea988c4/pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e", size = 6449855, upload-time = "2026-02-11T04:20:18.554Z" }, + { url = "https://files.pythonhosted.org/packages/96/ff/9caf4b5b950c669263c39e96c78c0d74a342c71c4f43fd031bb5cb7ceac9/pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff", size = 7151329, upload-time = "2026-02-11T04:20:20.646Z" }, + { url = "https://files.pythonhosted.org/packages/7b/f8/4b24841f582704da675ca535935bccb32b00a6da1226820845fac4a71136/pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40", size = 6325574, upload-time = "2026-02-11T04:20:22.43Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f9/9f6b01c0881d7036063aa6612ef04c0e2cad96be21325a1e92d0203f8e91/pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23", size = 7032347, upload-time = "2026-02-11T04:20:23.932Z" }, + { url = "https://files.pythonhosted.org/packages/79/13/c7922edded3dcdaf10c59297540b72785620abc0538872c819915746757d/pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9", size = 2453457, upload-time = "2026-02-11T04:20:25.392Z" }, + { url = "https://files.pythonhosted.org/packages/2b/46/5da1ec4a5171ee7bf1a0efa064aba70ba3d6e0788ce3f5acd1375d23c8c0/pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32", size = 5304084, upload-time = "2026-02-11T04:20:27.501Z" }, + { url = "https://files.pythonhosted.org/packages/78/93/a29e9bc02d1cf557a834da780ceccd54e02421627200696fcf805ebdc3fb/pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38", size = 4657866, upload-time = "2026-02-11T04:20:29.827Z" }, + { url = "https://files.pythonhosted.org/packages/13/84/583a4558d492a179d31e4aae32eadce94b9acf49c0337c4ce0b70e0a01f2/pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5", size = 6232148, upload-time = "2026-02-11T04:20:31.329Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e2/53c43334bbbb2d3b938978532fbda8e62bb6e0b23a26ce8592f36bcc4987/pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090", size = 8038007, upload-time = "2026-02-11T04:20:34.225Z" }, + { url = "https://files.pythonhosted.org/packages/b8/a6/3d0e79c8a9d58150dd98e199d7c1c56861027f3829a3a60b3c2784190180/pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af", size = 6345418, upload-time = "2026-02-11T04:20:35.858Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c8/46dfeac5825e600579157eea177be43e2f7ff4a99da9d0d0a49533509ac5/pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b", size = 7034590, upload-time = "2026-02-11T04:20:37.91Z" }, + { url = "https://files.pythonhosted.org/packages/af/bf/e6f65d3db8a8bbfeaf9e13cc0417813f6319863a73de934f14b2229ada18/pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5", size = 6458655, upload-time = "2026-02-11T04:20:39.496Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c2/66091f3f34a25894ca129362e510b956ef26f8fb67a0e6417bc5744e56f1/pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d", size = 7159286, upload-time = "2026-02-11T04:20:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5a/24bc8eb526a22f957d0cec6243146744966d40857e3d8deb68f7902ca6c1/pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c", size = 6328663, upload-time = "2026-02-11T04:20:43.184Z" }, + { url = "https://files.pythonhosted.org/packages/31/03/bef822e4f2d8f9d7448c133d0a18185d3cce3e70472774fffefe8b0ed562/pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563", size = 7031448, upload-time = "2026-02-11T04:20:44.696Z" }, + { url = "https://files.pythonhosted.org/packages/49/70/f76296f53610bd17b2e7d31728b8b7825e3ac3b5b3688b51f52eab7c0818/pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80", size = 2453651, upload-time = "2026-02-11T04:20:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" }, + { url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" }, + { url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" }, + { url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" }, + { url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" }, + { url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" }, + { url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" }, + { url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" }, + { url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" }, + { url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" }, + { url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" }, + { url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" }, + { url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" }, + { url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" }, + { url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" }, + { url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" }, + { url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" }, + { url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" }, + { url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" }, + { url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" }, + { url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" }, + { url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" }, + { url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" }, + { url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" }, + { url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" }, + { url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" }, + { url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" }, + { url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" }, + { url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" }, + { url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" }, + { url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" }, + { url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" }, + { url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" }, + { url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" }, + { url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" }, + { url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" }, + { url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" }, + { url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" }, + { url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" }, + { url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" }, + { url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" }, + { url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" }, + { url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" }, + { url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" }, + { url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" }, + { url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" }, + { url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" }, + { url = "https://files.pythonhosted.org/packages/56/11/5d43209aa4cb58e0cc80127956ff1796a68b928e6324bbf06ef4db34367b/pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f", size = 5228606, upload-time = "2026-02-11T04:22:52.106Z" }, + { url = "https://files.pythonhosted.org/packages/5f/d5/3b005b4e4fda6698b371fa6c21b097d4707585d7db99e98d9b0b87ac612a/pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9", size = 4622321, upload-time = "2026-02-11T04:22:53.827Z" }, + { url = "https://files.pythonhosted.org/packages/df/36/ed3ea2d594356fd8037e5a01f6156c74bc8d92dbb0fa60746cc96cabb6e8/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e", size = 5247579, upload-time = "2026-02-11T04:22:56.094Z" }, + { url = "https://files.pythonhosted.org/packages/54/9a/9cc3e029683cf6d20ae5085da0dafc63148e3252c2f13328e553aaa13cfb/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9", size = 6989094, upload-time = "2026-02-11T04:22:58.288Z" }, + { url = "https://files.pythonhosted.org/packages/00/98/fc53ab36da80b88df0967896b6c4b4cd948a0dc5aa40a754266aa3ae48b3/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3", size = 5313850, upload-time = "2026-02-11T04:23:00.554Z" }, + { url = "https://files.pythonhosted.org/packages/30/02/00fa585abfd9fe9d73e5f6e554dc36cc2b842898cbfc46d70353dae227f8/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735", size = 5963343, upload-time = "2026-02-11T04:23:02.934Z" }, + { url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" }, ] [[package]] @@ -3251,6 +3305,7 @@ dependencies = [ { name = "json2html" }, { name = "kajson" }, { name = "markdown" }, + { name = "mthds" }, { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "networkx", version = "3.6.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "openai" }, @@ -3267,6 +3322,7 @@ dependencies = [ { name = "python-dotenv" }, { name = "pyyaml" }, { name = "rich" }, + { name = "semantic-version" }, { name = "shortuuid" }, { name = "tomli" }, { name = "tomlkit" }, @@ -3286,6 +3342,7 @@ dev = [ { name = "boto3-stubs" }, { name = "moto", extra = ["s3"] }, { name = "mypy" }, + { name = "pipelex-tools" }, { name = "pylint" }, { name = "pyright" }, { name = "pytest" }, @@ -3366,6 +3423,7 @@ requires-dist = [ { name = "mkdocs-material", marker = "extra == 'docs'", specifier = ">=9.6.14" }, { name = "mkdocs-meta-manager", marker = "extra == 'docs'", specifier = ">=1.1.0" }, { name = "moto", extras = ["s3"], marker = "extra == 'dev'", specifier = ">=5.0.0" }, + { name = "mthds", specifier = ">=0.0.1" }, { name = "mypy", marker = "extra == 'dev'", specifier = "==1.19.1" }, { name = "networkx", specifier = ">=3.4.2" }, { name = "openai", specifier = ">=1.108.1" }, @@ -3374,6 +3432,7 @@ requires-dist = [ { name = "opentelemetry-sdk" }, { name = "opentelemetry-semantic-conventions" }, { name = "pillow", specifier = ">=11.2.1" }, + { name = "pipelex-tools", marker = "extra == 'dev'", specifier = ">=0.1.1" }, { name = "polyfactory", specifier = ">=2.21.0" }, { name = "portkey-ai", specifier = ">=2.1.0" }, { name = "posthog", specifier = ">=6.7.0" }, @@ -3391,6 +3450,7 @@ requires-dist = [ { name = "pyyaml", specifier = ">=6.0.2" }, { name = "rich", specifier = ">=13.8.1" }, { name = "ruff", marker = "extra == 'dev'", specifier = "==0.14.13" }, + { name = "semantic-version", specifier = ">=2.10.0" }, { name = "shortuuid", specifier = ">=1.0.13" }, { name = "tomli", specifier = ">=2.3.0" }, { name = "tomlkit", specifier = ">=0.13.2" }, @@ -3404,13 +3464,28 @@ requires-dist = [ ] provides-extras = ["anthropic", "bedrock", "docling", "fal", "gcp-storage", "google", "google-genai", "huggingface", "mistralai", "s3", "docs", "dev"] +[[package]] +name = "pipelex-tools" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/cd/e01fda44b3228a65f3527ec7b0bd293b7f3e1bd51d9bd5c2f3e02a120610/pipelex_tools-0.1.4.tar.gz", hash = "sha256:13d0cfd609d239fe900dc2088959efe02246ddebf5d129d41ac7ab14237e4858", size = 141659, upload-time = "2026-02-17T09:04:45.918Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/f6/8a44b810a12fdd69fbc4e7208d210807bf925929b99a9928f85b779e7ace/pipelex_tools-0.1.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:28daec84cf7c0236e06bc997b1d1be6d49a7364fa48db8d50f6b12e33112972e", size = 5070670, upload-time = "2026-02-17T09:04:30.155Z" }, + { url = "https://files.pythonhosted.org/packages/19/45/2115f126ca7ef4fa0067c6d222e46e6889a211ab17b92cf82315b9517681/pipelex_tools-0.1.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:dc795d831fba9c7e4226a3e99264dd12acae22e302a5dd4990a18cb7bc371f3e", size = 4815945, upload-time = "2026-02-17T09:04:32.728Z" }, + { url = "https://files.pythonhosted.org/packages/29/0c/1fd0fe5e2653b9a85d332e0500d6e6be1aef63de1aae42a2cb7da1f58a53/pipelex_tools-0.1.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46095fe914bf03836dafb1b2a3f8f6da11dfeef6f95e27da467f0c0663cc8aa5", size = 4955694, upload-time = "2026-02-17T09:04:35.454Z" }, + { url = "https://files.pythonhosted.org/packages/23/a4/1fbe3098eda66c510e17e041e7a35e62efaf7c05951d8c048d790adf319d/pipelex_tools-0.1.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2dbafaa0fdd3f66092cd00db3b7ff27251073f777c8425c510e2fee02bd71ea", size = 5201405, upload-time = "2026-02-17T09:04:37.868Z" }, + { url = "https://files.pythonhosted.org/packages/79/a2/d6ae8bf2ae18ea1cd733f5542b9bbebba5b3a11e291da417387f26100466/pipelex_tools-0.1.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe42601510376364e26b36448175f2a9f73e8f80d07a8d069e5de36da9ee5c61", size = 5214166, upload-time = "2026-02-17T09:04:40.313Z" }, + { url = "https://files.pythonhosted.org/packages/61/59/10a2bafdddab747accfdd7f13848e9c293289205d25faaae43d3e41976e7/pipelex_tools-0.1.4-py3-none-win32.whl", hash = "sha256:7cdd7389afe54e7365cf199bfd70d6d22627bab09583a7dc8553681120b36be2", size = 4589310, upload-time = "2026-02-17T09:04:42.186Z" }, + { url = "https://files.pythonhosted.org/packages/fd/94/e32e686613b5f985be36e78dbed9771cc89de9db573b702277ec641b0cd2/pipelex_tools-0.1.4-py3-none-win_amd64.whl", hash = "sha256:0d6e93a83ba60db422a04a94bc18e8d0cd67f7a911f6341a3702ca6760b685bb", size = 5388721, upload-time = "2026-02-17T09:04:44.448Z" }, +] + [[package]] name = "platformdirs" -version = "4.5.1" +version = "4.9.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1b/04/fea538adf7dbbd6d186f551d595961e564a3b6715bdf276b477460858672/platformdirs-4.9.2.tar.gz", hash = "sha256:9a33809944b9db043ad67ca0db94b14bf452cc6aeaac46a88ea55b26e2e9d291", size = 28394, upload-time = "2026-02-16T03:56:10.574Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, + { url = "https://files.pythonhosted.org/packages/48/31/05e764397056194206169869b50cf2fee4dbbbc71b344705b9c0d878d4d8/platformdirs-4.9.2-py3-none-any.whl", hash = "sha256:9170634f126f8efdae22fb58ae8a0eaa86f38365bc57897a6c4f781d1f5875bd", size = 21168, upload-time = "2026-02-16T03:56:08.891Z" }, ] [[package]] @@ -3437,7 +3512,7 @@ wheels = [ [[package]] name = "portkey-ai" -version = "2.1.0" +version = "2.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -3451,14 +3526,14 @@ dependencies = [ { name = "types-requests" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d4/8a/f5bbaab806ad61d9959cb7c88c639200feacac1b2ba7b455b97a2f216e7c/portkey_ai-2.1.0.tar.gz", hash = "sha256:c2558041c568eef8528737978089301cb9be056f166a683251831cbfa6a623cb", size = 567417, upload-time = "2025-11-25T20:32:43.102Z" } +sdist = { url = "https://files.pythonhosted.org/packages/40/b6/b5ff914ed6f171e6e0bc62fdec2b20b9bd812396acf133e10ecae15536ee/portkey_ai-2.2.0.tar.gz", hash = "sha256:ccff66e34c4014d65b18883c92ac242ab61663d02a46bde1d179bbe3157ed36c", size = 605122, upload-time = "2026-02-16T10:50:39.953Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/11/c585b90ac842027e5f4f7f7cee72d3197f58ff24b6d7c5f1243aa8fa96be/portkey_ai-2.1.0-py3-none-any.whl", hash = "sha256:2166033f8e198745947fee5321d0bbcfb005afc35468bd5a948fa83dc16b6767", size = 1181622, upload-time = "2025-11-25T20:32:41.185Z" }, + { url = "https://files.pythonhosted.org/packages/1c/be/0a817a2ab407f929deff8b94abfe170171ca8ff0e9f81e7a8182328e5bb0/portkey_ai-2.2.0-py3-none-any.whl", hash = "sha256:9d33c2f6f29777904a269da1deb3a34e3517028e779dac95b3297481c005291a", size = 1253175, upload-time = "2026-02-16T10:50:38.634Z" }, ] [[package]] name = "posthog" -version = "7.8.3" +version = "7.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backoff" }, @@ -3468,9 +3543,9 @@ dependencies = [ { name = "six" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d1/ad/2f116cd9b83dc83ece4328a4efe0bcb80e5c2993837f89a788467d261da8/posthog-7.8.3.tar.gz", hash = "sha256:2b85e818bf818ac2768a890b772b7c12d4f909797226acd9327d66a319dbcf83", size = 167083, upload-time = "2026-02-06T13:16:22.938Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5b/8c/0b66a64bca61d927b7e3ddab564419213303ab9f2bce42036b894c3919d7/posthog-7.9.1.tar.gz", hash = "sha256:448fb0ec4d5cabb8967a36512b8a9aa50d1e46657913245031b41ca434ca9882", size = 172247, upload-time = "2026-02-17T16:59:40.345Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/e5/5a4b060cbb9aa9defb8bfd55d15899b3146fece14147f4d66be80e81955a/posthog-7.8.3-py3-none-any.whl", hash = "sha256:1840796e4f7e14dd91ec5fdeb939712c3383fe9e758cfcdeb0317d8f30f7b901", size = 192528, upload-time = "2026-02-06T13:16:21.385Z" }, + { url = "https://files.pythonhosted.org/packages/78/47/9033ff9020f9b473e4f89b349f821c2ad053241cf2274693f838c9edbc7b/posthog-7.9.1-py3-none-any.whl", hash = "sha256:fe1c4869545f0bfaf458618f03875ea226f17ea23a94553e2e9d611dc77af722", size = 197851, upload-time = "2026-02-17T16:59:37.679Z" }, ] [[package]] @@ -3859,16 +3934,16 @@ wheels = [ [[package]] name = "pydantic-settings" -version = "2.12.0" +version = "2.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic" }, { name = "python-dotenv" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } +sdist = { url = "https://files.pythonhosted.org/packages/96/a1/ae859ffac5a3338a66b74c5e29e244fd3a3cc483c89feaf9f56c39898d75/pydantic_settings-2.13.0.tar.gz", hash = "sha256:95d875514610e8595672800a5c40b073e99e4aae467fa7c8f9c263061ea2e1fe", size = 222450, upload-time = "2026-02-15T12:11:23.476Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1a/dd1b9d7e627486cf8e7523d09b70010e05a4bc41414f4ae6ce184cf0afb6/pydantic_settings-2.13.0-py3-none-any.whl", hash = "sha256:d67b576fff39cd086b595441bf9c75d4193ca9c0ed643b90360694d0f1240246", size = 58429, upload-time = "2026-02-15T12:11:22.133Z" }, ] [[package]] @@ -3907,15 +3982,15 @@ wheels = [ [[package]] name = "pymdown-extensions" -version = "10.20.1" +version = "10.21" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown" }, { name = "pyyaml" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1e/6c/9e370934bfa30e889d12e61d0dae009991294f40055c238980066a7fbd83/pymdown_extensions-10.20.1.tar.gz", hash = "sha256:e7e39c865727338d434b55f1dd8da51febcffcaebd6e1a0b9c836243f660740a", size = 852860, upload-time = "2026-01-24T05:56:56.758Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/63/06673d1eb6d8f83c0ea1f677d770e12565fb516928b4109c9e2055656a9e/pymdown_extensions-10.21.tar.gz", hash = "sha256:39f4a020f40773f6b2ff31d2cd2546c2c04d0a6498c31d9c688d2be07e1767d5", size = 853363, upload-time = "2026-02-15T20:44:06.748Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/40/6d/b6ee155462a0156b94312bdd82d2b92ea56e909740045a87ccb98bf52405/pymdown_extensions-10.20.1-py3-none-any.whl", hash = "sha256:24af7feacbca56504b313b7b418c4f5e1317bb5fea60f03d57be7fcc40912aa0", size = 268768, upload-time = "2026-01-24T05:56:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/6f/2c/5b079febdc65e1c3fb2729bf958d18b45be7113828528e8a0b5850dd819a/pymdown_extensions-10.21-py3-none-any.whl", hash = "sha256:91b879f9f864d49794c2d9534372b10150e6141096c3908a455e45ca72ad9d3f", size = 268877, upload-time = "2026-02-15T20:44:05.464Z" }, ] [[package]] @@ -4923,6 +4998,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/7f/f783e2254db082df4f6bc00fe3b32b9dd27c3b7302a44c8c37728bb67fb7/selectolax-0.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:66558cfb1c7402fed0f47b9a2692eed53e3e2f345526314b493b5093cb951e21", size = 1906079, upload-time = "2025-12-06T12:35:32.951Z" }, ] +[[package]] +name = "semantic-version" +version = "2.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/31/f2289ce78b9b473d582568c234e104d2a342fd658cc288a7553d83bb8595/semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c", size = 52289, upload-time = "2022-05-26T13:35:23.454Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/23/8146aad7d88f4fcb3a6218f41a60f6c2d4e3a72de72da1825dc7c8f7877c/semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177", size = 15552, upload-time = "2022-05-26T13:35:21.206Z" }, +] + [[package]] name = "semchunk" version = "2.2.2" @@ -4938,11 +5022,11 @@ wheels = [ [[package]] name = "setuptools" -version = "81.0.0" +version = "82.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0d/1c/73e719955c59b8e424d015ab450f51c0af856ae46ea2da83eba51cc88de1/setuptools-81.0.0.tar.gz", hash = "sha256:487b53915f52501f0a79ccfd0c02c165ffe06631443a886740b91af4b7a5845a", size = 1198299, upload-time = "2026-02-06T21:10:39.601Z" } +sdist = { url = "https://files.pythonhosted.org/packages/82/f3/748f4d6f65d1756b9ae577f329c951cda23fb900e4de9f70900ced962085/setuptools-82.0.0.tar.gz", hash = "sha256:22e0a2d69474c6ae4feb01951cb69d515ed23728cf96d05513d36e42b62b37cb", size = 1144893, upload-time = "2026-02-08T15:08:40.206Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e1/e3/c164c88b2e5ce7b24d667b9bd83589cf4f3520d97cad01534cd3c4f55fdb/setuptools-81.0.0-py3-none-any.whl", hash = "sha256:fdd925d5c5d9f62e4b74b30d6dd7828ce236fd6ed998a08d81de62ce5a6310d6", size = 1062021, upload-time = "2026-02-06T21:10:37.175Z" }, + { url = "https://files.pythonhosted.org/packages/e1/c6/76dc613121b793286a3f91621d7b75a2b493e0390ddca50f11993eadf192/setuptools-82.0.0-py3-none-any.whl", hash = "sha256:70b18734b607bd1da571d097d236cfcfacaf01de45717d59e6e04b96877532e0", size = 1003468, upload-time = "2026-02-08T15:08:38.723Z" }, ] [[package]] @@ -5452,17 +5536,17 @@ wheels = [ [[package]] name = "typer" -version = "0.21.1" +version = "0.21.2" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "annotated-doc" }, { name = "click" }, { name = "rich" }, { name = "shellingham" }, - { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/36/bf/8825b5929afd84d0dabd606c67cd57b8388cb3ec385f7ef19c5cc2202069/typer-0.21.1.tar.gz", hash = "sha256:ea835607cd752343b6b2b7ce676893e5a0324082268b48f27aa058bdb7d2145d", size = 110371, upload-time = "2026-01-06T11:21:10.989Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/1e/a27cc02a0cd715118c71fa2aef2c687fdefc3c28d90fd0dd789c5118154c/typer-0.21.2.tar.gz", hash = "sha256:1abd95a3b675e17ff61b0838ac637fe9478d446d62ad17fa4bb81ea57cc54028", size = 120426, upload-time = "2026-02-10T19:33:46.182Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/1d/d9257dd49ff2ca23ea5f132edf1281a0c4f9de8a762b9ae399b670a59235/typer-0.21.1-py3-none-any.whl", hash = "sha256:7985e89081c636b88d172c2ee0cfe33c253160994d47bdfdc302defd7d1f1d01", size = 47381, upload-time = "2026-01-06T11:21:09.824Z" }, + { url = "https://files.pythonhosted.org/packages/b8/cc/d59f893fbdfb5f58770c05febfc4086a46875f1084453621c35605cec946/typer-0.21.2-py3-none-any.whl", hash = "sha256:c3d8de54d00347ef90b82131ca946274f017cffb46683ae3883c360fa958f55c", size = 56728, upload-time = "2026-02-10T19:33:48.01Z" }, ] [[package]] @@ -5490,15 +5574,15 @@ bedrock-runtime = [ [[package]] name = "types-aiobotocore" -version = "3.1.2" +version = "3.1.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore-stubs" }, { name = "typing-extensions", marker = "python_full_version < '3.12'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e8/99/5e2980833461c997426b1b660b7ab491dd0c54d325544c96faef4768c504/types_aiobotocore-3.1.2.tar.gz", hash = "sha256:82c19bb6fa4d70cca6beffcc49ae5f28990c9178ed5fd3ef3c86d69f062ee7c2", size = 86455, upload-time = "2026-02-06T02:34:36.735Z" } +sdist = { url = "https://files.pythonhosted.org/packages/51/84/7bac4416719b7b45065495ffce1a6d3d6dec91be27fefc7d4b706b08d7bf/types_aiobotocore-3.1.3.tar.gz", hash = "sha256:e19b3b91d9da46cc5fb4c0ae9e51a6f02e168608db84cbbcbc26a3c419cf386a", size = 86437, upload-time = "2026-02-15T02:44:14.955Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/cd/0ddd0874f2d2df81557557fbc7d565dd76cd1593805d6d875ee569893a6a/types_aiobotocore-3.1.2-py3-none-any.whl", hash = "sha256:8372cd606c648b9f91ae065d0610e43e9f8b432e21eafe4182d139ea76ba3df8", size = 54206, upload-time = "2026-02-06T02:34:33.721Z" }, + { url = "https://files.pythonhosted.org/packages/3b/63/f7dad2aff2c66efb7bd7e38b116ff0bc40c7223f54eef1cb50b8b265d287/types_aiobotocore-3.1.3-py3-none-any.whl", hash = "sha256:7cc70db11c568ca439f81473d711f16955a10d7289c519d7943f01a8e12ea727", size = 54208, upload-time = "2026-02-15T02:44:09.873Z" }, ] [[package]] @@ -5536,33 +5620,33 @@ wheels = [ [[package]] name = "types-awscrt" -version = "0.31.1" +version = "0.31.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/97/be/589b7bba42b5681a72bac4d714287afef4e1bb84d07c859610ff631d449e/types_awscrt-0.31.1.tar.gz", hash = "sha256:08b13494f93f45c1a92eb264755fce50ed0d1dc75059abb5e31670feb9a09724", size = 17839, upload-time = "2026-01-16T02:01:23.394Z" } +sdist = { url = "https://files.pythonhosted.org/packages/18/24/5497a611f32cbaf4b9e1af35f56463e8f02e198ec513b68cb59a63f5a446/types_awscrt-0.31.2.tar.gz", hash = "sha256:dc79705acd24094656b8105b8d799d7e273c8eac37c69137df580cd84beb54f6", size = 18190, upload-time = "2026-02-16T02:33:53.135Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/fd/ddca80617f230bd833f99b4fb959abebffd8651f520493cae2e96276b1bd/types_awscrt-0.31.1-py3-none-any.whl", hash = "sha256:7e4364ac635f72bd57f52b093883640b1448a6eded0ecbac6e900bf4b1e4777b", size = 42516, upload-time = "2026-01-16T02:01:21.637Z" }, + { url = "https://files.pythonhosted.org/packages/ab/3d/21a2212b5fcef9e8e9f368403885dc567b7d31e50b2ce393efad3cd83572/types_awscrt-0.31.2-py3-none-any.whl", hash = "sha256:3d6a29c1cca894b191be408f4d985a8e3a14d919785652dd3fa4ee558143e4bf", size = 43340, upload-time = "2026-02-16T02:33:52.109Z" }, ] [[package]] name = "types-markdown" -version = "3.10.0.20251106" +version = "3.10.2.20260211" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/de/e4/060f0dadd9b551cae77d6407f2bc84b168f918d90650454aff219c1b3ed2/types_markdown-3.10.0.20251106.tar.gz", hash = "sha256:12836f7fcbd7221db8baeb0d3a2f820b95050d0824bfa9665c67b4d144a1afa1", size = 19486, upload-time = "2025-11-06T03:06:44.317Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/2e/35b30a09f6ee8a69142408d3ceb248c4454aa638c0a414d8704a3ef79563/types_markdown-3.10.2.20260211.tar.gz", hash = "sha256:66164310f88c11a58c6c706094c6f8c537c418e3525d33b76276a5fbd66b01ce", size = 19768, upload-time = "2026-02-11T04:19:29.497Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/58/f666ca9391f2a8bd33bb0b0797cde6ac3e764866708d5f8aec6fab215320/types_markdown-3.10.0.20251106-py3-none-any.whl", hash = "sha256:2c39512a573899b59efae07e247ba088a75b70e3415e81277692718f430afd7e", size = 25862, upload-time = "2025-11-06T03:06:43.082Z" }, + { url = "https://files.pythonhosted.org/packages/54/c9/659fa2df04b232b0bfcd05d2418e683080e91ec68f636f3c0a5a267350e7/types_markdown-3.10.2.20260211-py3-none-any.whl", hash = "sha256:2d94d08587e3738203b3c4479c449845112b171abe8b5cadc9b0c12fcf3e99da", size = 25854, upload-time = "2026-02-11T04:19:28.647Z" }, ] [[package]] name = "types-networkx" -version = "3.6.1.20251220" +version = "3.6.1.20260210" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "numpy", version = "2.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/07/e3/dcc20d645dc0631b0df263959b8dde49dc47ad3c0537d8958bfefe692380/types_networkx-3.6.1.20251220.tar.gz", hash = "sha256:caf95e0d7777b969e50ceeb2c430d9d4dfe6b7bdee43c42dc9879a2d4408a790", size = 73500, upload-time = "2025-12-20T03:07:47.933Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4f/d9/7ddf6afb27246998ae41f7ad19da410d83e24623b4db065b5a46888d327e/types_networkx-3.6.1.20260210.tar.gz", hash = "sha256:9864affb01ed53d6bf41c1042fbced155ac409ae02ca505e0a3fffe48901b6e1", size = 73702, upload-time = "2026-02-10T04:22:17.641Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/65/e7/fe40cfe7ba384d1f46fee835eb7727a4ee2fd80021a69add9553197b69a1/types_networkx-3.6.1.20251220-py3-none-any.whl", hash = "sha256:417ccbe7841f335a4c2b8e7515c3bc97a00fb5f686f399a763ef64392b209eac", size = 162715, upload-time = "2025-12-20T03:07:46.882Z" }, + { url = "https://files.pythonhosted.org/packages/55/b0/1c45681a8b8d3ccf25cebaa296b06d5240518bd7a7d861cf14a15bf9dd20/types_networkx-3.6.1.20260210-py3-none-any.whl", hash = "sha256:075ccb9f2e2b370c3a9eae9636f2f38890e7c494e6323cb72a0207f104f8225e", size = 162684, upload-time = "2026-02-10T04:22:16.055Z" }, ] [[package]] @@ -5826,11 +5910,11 @@ wheels = [ [[package]] name = "xmltodict" -version = "1.0.2" +version = "1.0.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6a/aa/917ceeed4dbb80d2f04dbd0c784b7ee7bba8ae5a54837ef0e5e062cd3cfb/xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649", size = 25725, upload-time = "2025-09-17T21:59:26.459Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/66/d1242ce8748c698e0036837dfbb530480e31f11d3ecf7101cd4e30d29913/xmltodict-1.0.3.tar.gz", hash = "sha256:3bf1f49c7836df34cf6d9cc7e690c4351f7dfff2ab0b8a1988bba4a9b9474909", size = 25170, upload-time = "2026-02-15T04:05:05.728Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/20/69a0e6058bc5ea74892d089d64dfc3a62ba78917ec5e2cfa70f7c92ba3a5/xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d", size = 13893, upload-time = "2025-09-17T21:59:24.859Z" }, + { url = "https://files.pythonhosted.org/packages/82/df/5c50d5bfc886cf61e665f34925e70c7d5ab1d39db0bcc4a03f6fdae3cd76/xmltodict-1.0.3-py3-none-any.whl", hash = "sha256:35d65d5c08f2a1121df338a0c4e49ca638480fa7c1b899ded45e0759bf32e40e", size = 13295, upload-time = "2026-02-15T04:05:04.407Z" }, ] [[package]]