From 5b383d040654d01a463f8826c5ee2c738e608652 Mon Sep 17 00:00:00 2001 From: Louis Choquel <8851983+lchoquel@users.noreply.github.com> Date: Sun, 14 Dec 2025 21:58:24 +0100 Subject: [PATCH 01/10] Use Gateway (local dep) --- .env.example | 2 +- my_project/hello_world.py | 9 +- pyproject.toml | 4 + uv.lock | 249 ++++++++++++++++++++++++++++++++++++-- 4 files changed, 250 insertions(+), 14 deletions(-) diff --git a/.env.example b/.env.example index 22b29d7..cbc7ad2 100644 --- a/.env.example +++ b/.env.example @@ -1 +1 @@ -PIPELEX_INFERENCE_API_KEY= \ No newline at end of file +PIPELEX_GATEWAY_API_KEY= \ No newline at end of file diff --git a/my_project/hello_world.py b/my_project/hello_world.py index 5614816..7249a14 100644 --- a/my_project/hello_world.py +++ b/my_project/hello_world.py @@ -17,8 +17,11 @@ async def hello_world(): # Print the output pretty_print(pipe_output, title="Your first Pipelex output") + # get the generated text + generated_text = pipe_output.main_stuff_as_str + pretty_print(generated_text, title="Generated text") + # start Pipelex -Pipelex.make() -# run sample using asyncio -asyncio.run(hello_world()) +with Pipelex.make(): + asyncio.run(hello_world()) diff --git a/pyproject.toml b/pyproject.toml index 1cfd5d7..821ce0d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,10 @@ dependencies = [ "pipelex[mistralai,anthropic,google,google-genai,bedrock,fal]==0.17.3", ] +[tool.uv.sources] +pipelex = { path = "../pipelex", editable = true } + + [tool.setuptools] packages = ["my_project"] include-package-data = true diff --git a/uv.lock b/uv.lock index f5923b6..66406c0 100644 --- a/uv.lock +++ b/uv.lock @@ -343,6 +343,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/68/b6/f624f5143bc5f7a66b79fc40e67b30b9584471a6143062af420bc83ae887/botocore_stubs-1.41.6-py3-none-any.whl", hash = "sha256:859e4147b5b14dc5eb64fc84fa02424839354368a0fea41da52c7a1d06427e37", size = 66748, upload-time = "2025-12-01T04:14:12.833Z" }, ] +[[package]] +name = "cached-property" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/76/4b/3d870836119dbe9a5e3c9a61af8cc1a8b69d75aea564572e385882d5aefb/cached_property-2.0.1.tar.gz", hash = "sha256:484d617105e3ee0e4f1f58725e72a8ef9e93deee462222dbd51cd91230897641", size = 10574, upload-time = "2024-10-25T15:43:55.667Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/0e/7d8225aab3bc1a0f5811f8e1b557aa034ac04bdf641925b30d3caf586b28/cached_property-2.0.1-py3-none-any.whl", hash = "sha256:f617d70ab1100b7bcf6e42228f9ddcb78c676ffa167278d9f730d1c2fba69ccb", size = 7428, upload-time = "2024-10-25T15:43:54.711Z" }, +] + [[package]] name = "cachetools" version = "6.2.2" @@ -747,6 +756,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/66/03f663e7bca7abe9ccfebe6cb3fe7da9a118fd723a5abb278d6117e7990e/google_genai-1.52.0-py3-none-any.whl", hash = "sha256:c8352b9f065ae14b9322b949c7debab8562982f03bf71d44130cd2b798c20743", size = 261219, upload-time = "2025-11-21T02:18:54.515Z" }, ] +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + [[package]] name = "h11" version = "0.16.0" @@ -811,6 +832,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, ] +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + [[package]] name = "iniconfig" version = "2.3.0" @@ -1377,7 +1410,7 @@ dev = [ requires-dist = [ { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, - { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], specifier = "==0.17.3" }, + { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], editable = "../pipelex" }, { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, @@ -1657,6 +1690,88 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, ] +[[package]] +name = "opentelemetry-api" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/9d/22d241b66f7bbde88a3bfa6847a351d2c46b84de23e71222c6aae25c7050/opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464", size = 20409, upload-time = "2025-12-11T13:32:40.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/02/ffc3e143d89a27ac21fd557365b98bd0653b98de8a101151d5805b5d4c33/opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde", size = 18366, upload-time = "2025-12-11T13:32:20.2Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/04/2a08fa9c0214ae38880df01e8bfae12b067ec0793446578575e5080d6545/opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb", size = 17288, upload-time = "2025-12-11T13:32:42.029Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/f1/b27d3e2e003cd9a3592c43d099d2ed8d0a947c15281bf8463a256db0b46c/opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985", size = 19641, upload-time = "2025-12-11T13:32:22.248Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/1d/f25d76d8260c156c40c97c9ed4511ec0f9ce353f8108ca6e7561f82a06b2/opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8", size = 46152, upload-time = "2025-12-11T13:32:48.681Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/95/b40c96a7b5203005a0b03d8ce8cd212ff23f1793d5ba289c87a097571b18/opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007", size = 72535, upload-time = "2025-12-11T13:32:33.866Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, +] + [[package]] name = "packaging" version = "25.0" @@ -1776,7 +1891,7 @@ wheels = [ [[package]] name = "pipelex" version = "0.17.3" -source = { registry = "https://pypi.org/simple" } +source = { editable = "../pipelex" } dependencies = [ { name = "aiofiles" }, { name = "backports-strenum", marker = "python_full_version < '3.11'" }, @@ -1790,8 +1905,13 @@ dependencies = [ { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, { name = "networkx", version = "3.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "openai" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, { name = "pillow" }, { name = "polyfactory" }, + { name = "portkey-ai" }, { name = "posthog" }, { name = "pydantic" }, { name = "pypdfium2" }, @@ -1803,11 +1923,6 @@ dependencies = [ { name = "tomlkit" }, { name = "typer" }, { name = "typing-extensions" }, - { name = "yattag" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/33/69/98c518fc203b3096bf163124d3eeae8b15f8608c28e4292b977bef99bbc1/pipelex-0.17.3.tar.gz", hash = "sha256:9be425de4faee01d1039f8e97c55a2beb66e91b005be13323709f5843b917d64", size = 368295, upload-time = "2025-12-01T13:45:52.849Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4c/82/1ad4436608bd1c33077d9d0905a0d93640b9c47b310f1c19ae0f96c18224/pipelex-0.17.3-py3-none-any.whl", hash = "sha256:5dda80b9adcfd13433992e2dfd96f484c6339e14511388cb66adf5e22c28cafa", size = 567412, upload-time = "2025-12-01T13:45:51.07Z" }, ] [package.optional-dependencies] @@ -1832,6 +1947,69 @@ mistralai = [ { name = "mistralai" }, ] +[package.metadata] +requires-dist = [ + { name = "aioboto3", marker = "extra == 'bedrock'", specifier = ">=13.4.0" }, + { name = "aiofiles", specifier = ">=23.2.1" }, + { name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.60.0" }, + { name = "backports-strenum", marker = "python_full_version < '3.11'", specifier = ">=1.3.0" }, + { name = "boto3", marker = "extra == 'bedrock'", specifier = ">=1.34.131" }, + { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, + { name = "docling", marker = "extra == 'docling'", specifier = ">=2.64.0" }, + { name = "fal-client", marker = "extra == 'fal'", specifier = ">=0.4.1" }, + { name = "filetype", specifier = ">=1.2.0" }, + { name = "google-auth-oauthlib", marker = "extra == 'google'", specifier = ">=1.2.1" }, + { name = "google-genai", marker = "extra == 'google-genai'" }, + { name = "httpx", specifier = ">=0.23.0,<1.0.0" }, + { name = "instructor", specifier = ">=1.8.3,!=1.11.*,!=1.12.*" }, + { name = "instructor", extras = ["google-genai"], marker = "extra == 'google-genai'" }, + { name = "jinja2", specifier = ">=3.1.4" }, + { name = "json2html", specifier = ">=1.3.0" }, + { name = "kajson", specifier = "==0.3.1" }, + { name = "markdown", specifier = ">=3.6" }, + { name = "mistralai", marker = "extra == 'mistralai'", specifier = "==1.5.2" }, + { name = "mkdocs", marker = "extra == 'docs'", specifier = "==1.6.1" }, + { name = "mkdocs-glightbox", marker = "extra == 'docs'", specifier = "==0.4.0" }, + { name = "mkdocs-material", marker = "extra == 'docs'", specifier = "==9.6.14" }, + { name = "mkdocs-meta-manager", marker = "extra == 'docs'", specifier = "==1.1.0" }, + { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, + { name = "networkx", specifier = ">=3.4.2" }, + { name = "openai", specifier = ">=1.108.1" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "pillow", specifier = ">=11.2.1" }, + { name = "polyfactory", specifier = ">=2.21.0" }, + { name = "portkey-ai", specifier = ">=2.1.0" }, + { name = "posthog", specifier = ">=6.7.0" }, + { name = "pydantic", specifier = ">=2.10.6,<3.0.0" }, + { name = "pylint", marker = "extra == 'dev'", specifier = ">=3.3.8" }, + { name = "pypdfium2", specifier = ">=4.30.0,!=4.30.1" }, + { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, + { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, + { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, + { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=6.1.1" }, + { name = "pytest-mock", marker = "extra == 'dev'", specifier = ">=3.14.0" }, + { name = "pytest-sugar", marker = "extra == 'dev'", specifier = ">=1.0.0" }, + { name = "pytest-xdist", marker = "extra == 'dev'", specifier = ">=3.6.1" }, + { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "pyyaml", specifier = ">=6.0.2" }, + { name = "rich", specifier = ">=13.8.1" }, + { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.6.8" }, + { name = "shortuuid", specifier = ">=1.0.13" }, + { name = "tomli", specifier = ">=2.3.0" }, + { name = "tomlkit", specifier = ">=0.13.2" }, + { name = "typer", specifier = ">=0.16.0" }, + { name = "types-aioboto3", extras = ["bedrock", "bedrock-runtime"], marker = "extra == 'dev'", specifier = ">=13.4.0" }, + { name = "types-aiofiles", marker = "extra == 'dev'", specifier = ">=24.1.0.20240626" }, + { name = "types-markdown", marker = "extra == 'dev'", specifier = ">=3.6.0.20240316" }, + { name = "types-networkx", marker = "extra == 'dev'", specifier = ">=3.3.0.20241020" }, + { name = "types-pyyaml", marker = "extra == 'dev'", specifier = ">=6.0.12.20250326" }, + { name = "typing-extensions", specifier = ">=4.13.2" }, +] +provides-extras = ["anthropic", "bedrock", "docling", "fal", "google", "google-genai", "mistralai", "docs", "dev"] + [[package]] name = "platformdirs" version = "4.5.0" @@ -1863,6 +2041,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/7c/535646d75a1c510065169ea65693613c7a6bc64491bea13e7dad4f028ff3/polyfactory-3.1.0-py3-none-any.whl", hash = "sha256:78171232342c25906d542513c9f00ebf41eadec2c67b498490a577024dd7e867", size = 61836, upload-time = "2025-11-25T08:10:14.893Z" }, ] +[[package]] +name = "portkey-ai" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "cached-property" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "types-requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d4/8a/f5bbaab806ad61d9959cb7c88c639200feacac1b2ba7b455b97a2f216e7c/portkey_ai-2.1.0.tar.gz", hash = "sha256:c2558041c568eef8528737978089301cb9be056f166a683251831cbfa6a623cb", size = 567417, upload-time = "2025-11-25T20:32:43.102Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/11/c585b90ac842027e5f4f7f7cee72d3197f58ff24b6d7c5f1243aa8fa96be/portkey_ai-2.1.0-py3-none-any.whl", hash = "sha256:2166033f8e198745947fee5321d0bbcfb005afc35468bd5a948fa83dc16b6767", size = 1181622, upload-time = "2025-11-25T20:32:41.185Z" }, +] + [[package]] name = "posthog" version = "7.0.1" @@ -2010,6 +2209,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" }, ] +[[package]] +name = "protobuf" +version = "6.33.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" }, + { url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" }, + { url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, +] + [[package]] name = "pyasn1" version = "0.6.1" @@ -2705,6 +2919,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338, upload-time = "2025-09-15T03:00:59.218Z" }, ] +[[package]] +name = "types-requests" +version = "2.32.4.20250913" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/36/27/489922f4505975b11de2b5ad07b4fe1dca0bca9be81a703f26c5f3acfce5/types_requests-2.32.4.20250913.tar.gz", hash = "sha256:abd6d4f9ce3a9383f269775a9835a4c24e5cd6b9f647d64f88aa4613c33def5d", size = 23113, upload-time = "2025-09-13T02:40:02.309Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/20/9a227ea57c1285986c4cf78400d0a91615d25b24e257fd9e2969606bdfae/types_requests-2.32.4.20250913-py3-none-any.whl", hash = "sha256:78c9c1fffebbe0fa487a418e0fa5252017e9c60d1a2da394077f1780f655d7e1", size = 20658, upload-time = "2025-09-13T02:40:01.115Z" }, +] + [[package]] name = "types-s3transfer" version = "0.15.0" @@ -3036,7 +3262,10 @@ wheels = [ ] [[package]] -name = "yattag" -version = "1.16.1" +name = "zipp" +version = "3.23.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1c/1a/d3b2a2b8f843f5e7138471c4a5c9172ef62bb41239aa4371784b7448110c/yattag-1.16.1.tar.gz", hash = "sha256:baa8f254e7ea5d3e0618281ad2ff5610e0e5360b3608e695c29bfb3b29d051f4", size = 29069, upload-time = "2024-11-02T22:38:30.443Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] From d79a90a3cb6d21b131862e4e3ef2aedf95a9f380 Mon Sep 17 00:00:00 2001 From: Louis Choquel <8851983+lchoquel@users.noreply.github.com> Date: Mon, 12 Jan 2026 17:07:01 +0100 Subject: [PATCH 02/10] Use feature/Chicago --- .pipelex/inference/backends.toml | 109 +++++ .pipelex/inference/backends/anthropic.toml | 100 +++++ .pipelex/inference/backends/azure_openai.toml | 213 ++++++++++ .pipelex/inference/backends/bedrock.toml | 120 ++++++ .pipelex/inference/backends/blackboxai.toml | 240 +++++++++++ .pipelex/inference/backends/fal.toml | 107 +++++ .pipelex/inference/backends/google.toml | 94 +++++ .pipelex/inference/backends/groq.toml | 129 ++++++ .pipelex/inference/backends/huggingface.toml | 43 ++ .pipelex/inference/backends/internal.toml | 37 ++ .pipelex/inference/backends/mistral.toml | 164 ++++++++ .pipelex/inference/backends/ollama.toml | 63 +++ .pipelex/inference/backends/openai.toml | 208 +++++++++ .../inference/backends/pipelex_gateway.toml | 41 ++ .../inference/backends/pipelex_inference.toml | 205 +++++++++ .pipelex/inference/backends/portkey.toml | 263 ++++++++++++ .pipelex/inference/backends/scaleway.toml | 67 +++ .pipelex/inference/backends/vertexai.toml | 54 +++ .pipelex/inference/backends/xai.toml | 56 +++ .pipelex/inference/deck/base_deck.toml | 202 +++++++++ .pipelex/inference/deck/overrides.toml | 19 + .pipelex/inference/routing_profiles.toml | 173 ++++++++ .pipelex/pipelex.toml | 162 +++++++ .pipelex/pipelex_service.toml | 19 + .pipelex/telemetry.toml | 92 ++++ Makefile | 6 +- crazy/__init__.py | 0 crazy/bundle.plx | 42 ++ crazy/bundle_view.html | 111 +++++ crazy/bundle_view.svg | 397 ++++++++++++++++++ crazy/inputs.json | 1 + crazy/run_generate_crazy_image.py | 19 + crazy/structures/__init__.py | 0 .../crazy_image_generation_ImagePrompt.py | 22 + my_project/hello_world.py | 2 +- pyproject.toml | 6 +- uv.lock | 164 ++++---- 37 files changed, 3666 insertions(+), 84 deletions(-) create mode 100644 .pipelex/inference/backends.toml create mode 100644 .pipelex/inference/backends/anthropic.toml create mode 100644 .pipelex/inference/backends/azure_openai.toml create mode 100644 .pipelex/inference/backends/bedrock.toml create mode 100644 .pipelex/inference/backends/blackboxai.toml create mode 100644 .pipelex/inference/backends/fal.toml create mode 100644 .pipelex/inference/backends/google.toml create mode 100644 .pipelex/inference/backends/groq.toml create mode 100644 .pipelex/inference/backends/huggingface.toml create mode 100644 .pipelex/inference/backends/internal.toml create mode 100644 .pipelex/inference/backends/mistral.toml create mode 100644 .pipelex/inference/backends/ollama.toml create mode 100644 .pipelex/inference/backends/openai.toml create mode 100644 .pipelex/inference/backends/pipelex_gateway.toml create mode 100644 .pipelex/inference/backends/pipelex_inference.toml create mode 100644 .pipelex/inference/backends/portkey.toml create mode 100644 .pipelex/inference/backends/scaleway.toml create mode 100644 .pipelex/inference/backends/vertexai.toml create mode 100644 .pipelex/inference/backends/xai.toml create mode 100644 .pipelex/inference/deck/base_deck.toml create mode 100644 .pipelex/inference/deck/overrides.toml create mode 100644 .pipelex/inference/routing_profiles.toml create mode 100644 .pipelex/pipelex.toml create mode 100644 .pipelex/pipelex_service.toml create mode 100644 .pipelex/telemetry.toml create mode 100644 crazy/__init__.py create mode 100644 crazy/bundle.plx create mode 100644 crazy/bundle_view.html create mode 100644 crazy/bundle_view.svg create mode 100644 crazy/inputs.json create mode 100644 crazy/run_generate_crazy_image.py create mode 100644 crazy/structures/__init__.py create mode 100644 crazy/structures/crazy_image_generation_ImagePrompt.py diff --git a/.pipelex/inference/backends.toml b/.pipelex/inference/backends.toml new file mode 100644 index 0000000..46cbb79 --- /dev/null +++ b/.pipelex/inference/backends.toml @@ -0,0 +1,109 @@ +#################################################################################################### +# Pipelex Inference Backends Configuration +#################################################################################################### +# +# This file configures the inference backends available to Pipelex. +# Each backend connects to a different AI service provider (OpenAI, Anthropic, Google, etc.). +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +#################################################################################################### + +[pipelex_gateway] +display_name = "⭐ Pipelex Gateway" +enabled = true # Enable after accepting terms via `pipelex init config` +api_key = "${PIPELEX_GATEWAY_API_KEY}" + +[anthropic] +enabled = false +api_key = "${ANTHROPIC_API_KEY}" +valued_constraints = { max_output_tokens_limit = 8192 } + +[azure_openai] +display_name = "Azure OpenAI" +enabled = false +endpoint = "${AZURE_API_BASE}" +api_key = "${AZURE_API_KEY}" +api_version = "${AZURE_API_VERSION}" + +[bedrock] +display_name = "Amazon Bedrock" +enabled = false +aws_region = "${AWS_REGION}" + +[blackboxai] +display_name = "BlackBox AI" +enabled = false +endpoint = "https://api.blackbox.ai/v1" +api_key = "${BLACKBOX_API_KEY}" + +[fal] +display_name = "FAL" +enabled = false +api_key = "${FAL_API_KEY}" + +[google] +display_name = "Google AI" +enabled = false +api_key = "${GOOGLE_API_KEY}" + +[groq] +display_name = "Groq" +enabled = false +endpoint = "https://api.groq.com/openai/v1" +api_key = "${GROQ_API_KEY}" + +[huggingface] +display_name = "Hugging Face" +enabled = false +api_key = "${HF_TOKEN}" + +[mistral] +display_name = "Mistral AI" +enabled = false +api_key = "${MISTRAL_API_KEY}" + +[ollama] +enabled = false +endpoint = "http://localhost:11434/v1" + +[openai] +display_name = "OpenAI" +enabled = false +api_key = "${OPENAI_API_KEY}" + +[portkey] +display_name = "Portkey" +enabled = false +endpoint = "https://api.portkey.ai/v1" +api_key = "${PORTKEY_API_KEY}" + +[scaleway] +display_name = "Scaleway" +enabled = false +endpoint = "${SCALEWAY_ENDPOINT}" +api_key = "${SCALEWAY_API_KEY}" + +[vertexai] +display_name = "Google Vertex AI" +enabled = false # This is the only one we disable beacuse setting it up requires internet access just to get credentials so it fails in CI sandboxes +gcp_project_id = "${GCP_PROJECT_ID}" +gcp_location = "${GCP_LOCATION}" +gcp_credentials_file_path = "${GCP_CREDENTIALS_FILE_PATH}" + +[xai] +display_name = "xAI" +enabled = false +endpoint = "https://api.x.ai/v1" +api_key = "${XAI_API_KEY}" + +[internal] # software-only backend, runs internally, without AI +enabled = true + +# Deprecated +[pipelex_inference] +display_name = "🛑 Legacy Pipelex Inference" +enabled = false +endpoint = "https://inference.pipelex.com/v1" +api_key = "${PIPELEX_INFERENCE_API_KEY}" diff --git a/.pipelex/inference/backends/anthropic.toml b/.pipelex/inference/backends/anthropic.toml new file mode 100644 index 0000000..0f04f4d --- /dev/null +++ b/.pipelex/inference/backends/anthropic.toml @@ -0,0 +1,100 @@ +################################################################################ +# Anthropic Backend Configuration +################################################################################ +# +# This file defines the model specifications for Anthropic Claude models. +# It contains model definitions for various Claude language models +# accessible through the Anthropic API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["claude-3.5-sonnet"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "anthropic" +prompting_target = "anthropic" +structure_method = "instructor/anthropic_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- Claude 3 Series ---------------------------------------------------------- +[claude-3-haiku] +model_id = "claude-3-haiku-20240307" +max_tokens = 4096 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 0.25, output = 1.25 } + +# --- Claude 3.7 Series -------------------------------------------------------- +["claude-3.7-sonnet"] +model_id = "claude-3-7-sonnet-20250219" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +# --- Claude 4 Series ---------------------------------------------------------- +[claude-4-sonnet] +model_id = "claude-sonnet-4-20250514" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +[claude-4-opus] +model_id = "claude-opus-4-20250514" +max_tokens = 32000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +# --- Claude 4.1 Series -------------------------------------------------------- +["claude-4.1-opus"] +model_id = "claude-opus-4-1-20250805" +max_tokens = 32000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +# --- Claude 4.5 Series -------------------------------------------------------- +["claude-4.5-sonnet"] +model_id = "claude-sonnet-4-5-20250929" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +["claude-4.5-haiku"] +model_id = "claude-haiku-4-5-20251001" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 1.0, output = 5.0 } + +["claude-4.5-opus"] +model_id = "claude-opus-4-5-20251101" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 5.0, output = 25.0 } diff --git a/.pipelex/inference/backends/azure_openai.toml b/.pipelex/inference/backends/azure_openai.toml new file mode 100644 index 0000000..8a89898 --- /dev/null +++ b/.pipelex/inference/backends/azure_openai.toml @@ -0,0 +1,213 @@ +################################################################################ +# Azure OpenAI Backend Configuration +################################################################################ +# +# This file defines the model specifications for Azure OpenAI models. +# It contains model definitions for OpenAI models deployed on Azure +# accessible through the Azure OpenAI API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "azure_openai_responses" +prompting_target = "openai" +structure_method = "instructor/openai_responses_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- GPT-4o Series ------------------------------------------------------------ +[gpt-4o] +model_id = "gpt-4o-2024-11-20" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.5, output = 10.0 } + +[gpt-4o-mini] +model_id = "gpt-4o-mini-2024-07-18" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.6 } + +# --- GPT-4.1 Series ----------------------------------------------------------- +["gpt-4.1"] +model_id = "gpt-4.1-2025-04-14" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2, output = 8 } + +["gpt-4.1-mini"] +model_id = "gpt-4.1-mini-2025-04-14" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.4, output = 1.6 } + +["gpt-4.1-nano"] +model_id = "gpt-4.1-nano-2025-04-14" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.1, output = 0.4 } + +# --- o Series ---------------------------------------------------------------- +[o1-mini] +model_id = "o1-mini-2024-09-12" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 3.0, output = 12.0 } +valued_constraints = { fixed_temperature = 1 } + +[o1] +model_id = "o1-2024-12-17" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 15.0, output = 60.0 } +valued_constraints = { fixed_temperature = 1 } + +[o3-mini] +model_id = "o3-mini-2025-01-31" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.1, output = 4.4 } +valued_constraints = { fixed_temperature = 1 } + +[o3] +model_id = "o3-2025-04-16" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 2, output = 8 } +valued_constraints = { fixed_temperature = 1 } + +# --- GPT-5 Series ------------------------------------------------------------- +[gpt-5-mini] +model_id = "gpt-5-mini-2025-08-07" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.25, output = 2.0 } +valued_constraints = { fixed_temperature = 1 } + +[gpt-5-nano] +model_id = "gpt-5-nano-2025-08-07" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.05, output = 0.4 } +valued_constraints = { fixed_temperature = 1 } + +[gpt-5-chat] +model_id = "gpt-5-chat-2025-08-07" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +[gpt-5] +model_id = "gpt-5-2025-08-07" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +# --- GPT-5.1 Series ------------------------------------------------------------- +["gpt-5.1"] +model_id = "gpt-5.1-2025-11-13" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +["gpt-5.1-chat"] +model_id = "gpt-5.1-chat-2025-11-13" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +["gpt-5.1-codex"] +model_id = "gpt-5.1-codex-2025-11-13" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +# --- GPT-5.2 Series ------------------------------------------------------------- +["gpt-5.2"] +model_id = "gpt-5.2-2025-12-11" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.75, output = 14.0 } + +["gpt-5.2-chat"] +model_id = "gpt-5.2-chat-2025-12-11" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +################################################################################ +# IMAGE GENERATION MODELS +################################################################################ + +# --- OpenAI Image Generation -------------------------------------------------- +[gpt-image-1] +sdk = "azure_rest_img_gen" +model_type = "img_gen" +model_id = "gpt-image-1-2025-04-15" +inputs = ["text"] +outputs = ["image"] +costs = { input = 10, output = 40 } + +[gpt-image-1.rules] +prompt = "positive_only" +num_images = "gpt" +aspect_ratio = "gpt" +background = "gpt" +inference = "gpt" +safety_checker = "unavailable" +output_format = "gpt" + +[gpt-image-1-mini] +sdk = "azure_rest_img_gen" +model_type = "img_gen" +model_id = "gpt-image-1-mini-2025-10-06" +inputs = ["text"] +outputs = ["image"] +costs = { input = 2.5, output = 8 } + +[gpt-image-1-mini.rules] +prompt = "positive_only" +num_images = "gpt" +aspect_ratio = "gpt" +background = "gpt" +inference = "gpt" +safety_checker = "unavailable" +output_format = "gpt" + +["gpt-image-1.5"] +sdk = "azure_rest_img_gen" +model_type = "img_gen" +model_id = "gpt-image-1.5-2025-12-16" +inputs = ["text"] +outputs = ["image"] +costs = { input = 8, output = 32 } + +["gpt-image-1.5".rules] +prompt = "positive_only" +num_images = "gpt" +aspect_ratio = "gpt" +background = "gpt" +inference = "gpt" +safety_checker = "unavailable" +output_format = "gpt" diff --git a/.pipelex/inference/backends/bedrock.toml b/.pipelex/inference/backends/bedrock.toml new file mode 100644 index 0000000..c4ab176 --- /dev/null +++ b/.pipelex/inference/backends/bedrock.toml @@ -0,0 +1,120 @@ +################################################################################ +# Amazon Bedrock Backend Configuration +################################################################################ +# +# This file defines the model specifications for Amazon Bedrock models. +# It contains model definitions for various language models +# accessible through the Amazon Bedrock service. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["claude-3.5-sonnet"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "bedrock_aioboto3" +prompting_target = "anthropic" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- Mistral Models ----------------------------------------------------------- +[bedrock-mistral-large] +model_id = "mistral.mistral-large-2407-v1:0" +max_tokens = 8192 +inputs = ["text"] +outputs = ["text"] +costs = { input = 4.0, output = 12.0 } + +# --- Meta Llama Models -------------------------------------------------------- +[bedrock-meta-llama-3-3-70b-instruct] +model_id = "us.meta.llama3-3-70b-instruct-v1:0" +max_tokens = 8192 +inputs = ["text"] +outputs = ["text"] +# TODO: find out the actual cost per million tokens for llama3 on bedrock +costs = { input = 3.0, output = 15.0 } + +# --- Amazon Nova Models ------------------------------------------------------- +[bedrock-nova-pro] +model_id = "us.amazon.nova-pro-v1:0" +max_tokens = 5120 +inputs = ["text"] +outputs = ["text"] +# TODO: find out the actual cost per million tokens for nova on bedrock +costs = { input = 3.0, output = 15.0 } + +# --- Claude LLMs -------------------------------------------------------------- +["claude-3.7-sonnet"] +sdk = "bedrock_anthropic" +model_id = "us.anthropic.claude-3-7-sonnet-20250219-v1:0" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +[claude-4-sonnet] +sdk = "bedrock_anthropic" +model_id = "us.anthropic.claude-sonnet-4-20250514-v1:0" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +[claude-4-opus] +sdk = "bedrock_anthropic" +model_id = "us.anthropic.claude-opus-4-20250514-v1:0" +max_tokens = 32000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +["claude-4.1-opus"] +sdk = "bedrock_anthropic" +model_id = "us.anthropic.claude-opus-4-1-20250805-v1:0" +max_tokens = 32000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +["claude-4.5-sonnet"] +sdk = "bedrock_anthropic" +model_id = "us.anthropic.claude-sonnet-4-5-20250929-v1:0" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } + +["claude-4.5-haiku"] +sdk = "bedrock_anthropic" +model_id = "us.anthropic.claude-haiku-4-5-20251001-v1:0" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 1.0, output = 5.0 } + +["claude-4.5-opus"] +sdk = "bedrock_anthropic" +model_id = "global.anthropic.claude-opus-4-5-20251101-v1:0" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 5.0, output = 25.0 } diff --git a/.pipelex/inference/backends/blackboxai.toml b/.pipelex/inference/backends/blackboxai.toml new file mode 100644 index 0000000..9ee0433 --- /dev/null +++ b/.pipelex/inference/backends/blackboxai.toml @@ -0,0 +1,240 @@ +################################################################################ +# BlackBoxAI Backend Configuration +################################################################################ +# +# This file defines the model specifications for BlackBoxAI models. +# It contains model definitions for various language models from different providers +# accessible through the BlackBoxAI API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["gpt-4.5-preview"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai" +structure_method = "instructor/openai_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- OpenAI Models ------------------------------------------------------------ +[gpt-4o-mini] +model_id = "blackboxai/openai/gpt-4o-mini" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.60 } + +[gpt-4o] +model_id = "blackboxai/openai/gpt-4o" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.50, output = 10.00 } + +[o1-mini] +model_id = "blackboxai/openai/o1-mini" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.10, output = 4.40 } + +[o4-mini] +model_id = "blackboxai/openai/o4-mini" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.10, output = 4.40 } + +# --- Claude LLMs -------------------------------------------------------------- +["claude-3.5-haiku"] +model_id = "blackboxai/anthropic/claude-3.5-haiku" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.80, output = 4.00 } + +["claude-3.5-sonnet"] +model_id = "blackboxai/anthropic/claude-3.5-sonnet" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 3.00, output = 15.00 } + +["claude-3.7-sonnet"] +model_id = "blackboxai/anthropic/claude-3.7-sonnet" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 3.00, output = 15.00 } + +[claude-opus-4] +model_id = "blackboxai/anthropic/claude-opus-4" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 15.00, output = 75.00 } + +[claude-4-sonnet] +model_id = "blackboxai/anthropic/claude-sonnet-4" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 3.00, output = 15.00 } + +["claude-4.5-sonnet"] +model_id = "blackboxai/anthropic/claude-sonnet-4.5" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.28, output = 1.10 } + +# --- Google Models ------------------------------------------------------------ +["gemini-2.5-flash"] +model_id = "blackboxai/google/gemini-2.5-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.30, output = 2.50 } + +["gemini-2.5-pro"] +model_id = "blackboxai/google/gemini-2.5-pro" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.00 } + +["gemini-flash-1.5-8b"] +model_id = "blackboxai/google/gemini-flash-1.5-8b" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.04, output = 0.15 } + +# --- Mistral Models ----------------------------------------------------------- +[mistral-large] +model_id = "blackboxai/mistralai/mistral-large" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 2.00, output = 6.00 } + +[pixtral-large-2411] +model_id = "blackboxai/mistralai/pixtral-large-2411" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.00, output = 6.00 } + +# --- Meta Llama Models -------------------------------------------------------- +["llama-3.3-70b-instruct"] +model_id = "blackboxai/meta-llama/llama-3.3-70b-instruct" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.04, output = 0.12 } + +["llama-3.2-11b-vision-instruct"] +model_id = "blackboxai/meta-llama/llama-3.2-11b-vision-instruct" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.05, output = 0.05 } + +# --- Qwen Models -------------------------------------------------------------- +["qwen-2.5-72b-instruct"] +model_id = "blackboxai/qwen/qwen-2.5-72b-instruct" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.12, output = 0.39 } + +["qwen2.5-vl-72b-instruct"] +model_id = "blackboxai/qwen/qwen2.5-vl-72b-instruct" +inputs = ["text", "images"] +outputs = ["text"] +costs = { input = 0.25, output = 0.75 } + +# --- Amazon Nova Models ------------------------------------------------------- +[nova-micro-v1] +model_id = "blackboxai/amazon/nova-micro-v1" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.04, output = 0.14 } + +[nova-lite-v1] +model_id = "blackboxai/amazon/nova-lite-v1" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.06, output = 0.24 } + +################################################################################ +# FREE MODELS +################################################################################ + +# --- DeepSeek Free Models ----------------------------------------------------- +[deepseek-chat] +model_id = "blackboxai/deepseek/deepseek-chat:free" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.00, output = 0.00 } + +[deepseek-r1] +model_id = "blackboxai/deepseek/deepseek-r1:free" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.00, output = 0.00 } + +# --- Meta Llama Free Models --------------------------------------------------- +["llama-3.3-70b-instruct-free"] +model_id = "blackboxai/meta-llama/llama-3.3-70b-instruct:free" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.00, output = 0.00 } + + +################################################################################ +# IMAGE GENERATION MODELS +################################################################################ + +[flux-pro] +model_type = "img_gen" +sdk = "blackboxai_img_gen" +model_id = "blackboxai/black-forest-labs/flux-pro" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.04 } + +["flux-pro/v1.1"] +model_type = "img_gen" +sdk = "blackboxai_img_gen" +model_id = "blackboxai/black-forest-labs/flux-1.1-pro" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.04 } + +["flux-pro/v1.1-ultra"] +model_type = "img_gen" +sdk = "blackboxai_img_gen" +model_id = "blackboxai/black-forest-labs/flux-1.1-pro-ultra" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.06 } + +[fast-lightning-sdxl] +model_type = "img_gen" +sdk = "blackboxai_img_gen" +model_id = "blackboxai/bytedance/sdxl-lightning-4step" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.0014 } + +[nano-banana] +model_type = "img_gen" +sdk = "blackboxai_img_gen" +model_id = "blackboxai/google/nano-banana" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.039 } + +[nano-banana-pro] +model_type = "img_gen" +sdk = "blackboxai_img_gen" +model_id = "blackboxai/google/nano-banana-pro" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.039 } diff --git a/.pipelex/inference/backends/fal.toml b/.pipelex/inference/backends/fal.toml new file mode 100644 index 0000000..3433f99 --- /dev/null +++ b/.pipelex/inference/backends/fal.toml @@ -0,0 +1,107 @@ +################################################################################ +# FAL Backend Configuration +################################################################################ +# +# This file defines the model specifications for FAL (Fast AI Labs) models. +# It contains model definitions for various image generation models +# accessible through the FAL API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["flux-pro/v1.1"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "img_gen" +sdk = "fal" +prompting_target = "fal" + +################################################################################ +# IMAGE GENERATION MODELS +################################################################################ + +# --- Flux Pro Series ---------------------------------------------------------- +[flux-pro] +model_id = "fal-ai/flux-pro" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.05, output = 0.0 } + +[flux-pro.rules] +prompt = "positive_only" +num_images = "fal" +aspect_ratio = "flux" +inference = "flux" +safety_checker = "available" +output_format = "flux_1" +specific = "fal" + +["flux-pro/v1.1"] +model_id = "fal-ai/flux-pro/v1.1" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.05, output = 0.0 } + +["flux-pro/v1.1".rules] +prompt = "positive_only" +num_images = "fal" +aspect_ratio = "flux" +inference = "flux" +safety_checker = "available" +output_format = "flux_1" +specific = "fal" + +["flux-pro/v1.1-ultra"] +model_id = "fal-ai/flux-pro/v1.1-ultra" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.06, output = 0.0 } + +["flux-pro/v1.1-ultra".rules] +prompt = "positive_only" +num_images = "fal" +aspect_ratio = "flux_11_ultra" +inference = "flux_11_ultra" +safety_checker = "available" +output_format = "flux_1" +specific = "fal" + +[flux-2] +model_id = "fal-ai/flux-2" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.05, output = 0.0 } + +[flux-2.rules] +prompt = "positive_only" +num_images = "fal" +aspect_ratio = "flux" +inference = "flux" +safety_checker = "available" +output_format = "flux_2" +specific = "fal" + +# --- SDXL models -------------------------------------------------------------- +[fast-lightning-sdxl] +model_id = "fal-ai/fast-lightning-sdxl" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0003, output = 0.0 } + +[fast-lightning-sdxl.rules] +prompt = "positive_only" +num_images = "fal" +aspect_ratio = "flux" +inference = "sdxl_lightning" +safety_checker = "unavailable" +output_format = "sdxl" +specific = "fal" diff --git a/.pipelex/inference/backends/google.toml b/.pipelex/inference/backends/google.toml new file mode 100644 index 0000000..36e19c2 --- /dev/null +++ b/.pipelex/inference/backends/google.toml @@ -0,0 +1,94 @@ +################################################################################ +# Google Gemini API Backend Configuration +################################################################################ +# +# This file defines the model specifications for Google Gemini API models. +# It contains model definitions for Gemini language models +# accessible through the Google Gemini API (not VertexAI). +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["gemini-2.0-flash"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "google" +prompting_target = "gemini" +structure_method = "instructor/genai_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- Gemini 2.0 Series ---------------------------------------- +["gemini-2.0-flash"] +model_id = "gemini-2.0-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 0.10, output = 0.40 } + +# --- Gemini 2.5 Series ---------------------------------------- +["gemini-2.5-pro"] +model_id = "gemini-2.5-pro" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 1.25, output = 10.0 } + +["gemini-2.5-flash"] +model_id = "gemini-2.5-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 0.30, output = 2.50 } + +["gemini-2.5-flash-lite"] +model_id = "gemini-2.5-flash-lite" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 0.10, output = 0.40 } + +# --- Gemini 3.0 Series ---------------------------------------- +["gemini-3.0-pro"] +model_id = "gemini-3-pro-preview" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 2, output = 12.0 } + +["gemini-3.0-flash-preview"] +model_id = "gemini-3-flash-preview" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 0.5, output = 3.0 } + +################################################################################ +# IMAGE GENERATION MODELS (Nano Banana) +################################################################################ + +[nano-banana] +model_type = "img_gen" +model_id = "gemini-2.5-flash-image" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.039 } + +[nano-banana-pro] +model_type = "img_gen" +model_id = "gemini-3-pro-image-preview" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.039 } diff --git a/.pipelex/inference/backends/groq.toml b/.pipelex/inference/backends/groq.toml new file mode 100644 index 0000000..72bdae3 --- /dev/null +++ b/.pipelex/inference/backends/groq.toml @@ -0,0 +1,129 @@ +################################################################################ +# Groq Backend Configuration +################################################################################ +# +# This file defines the model specifications for Groq models. +# It contains model definitions for various LLM models accessible through +# the Groq API, including text-only and vision-capable models. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots or slashes must be quoted (e.g., ["meta-llama/llama-4-scout"]) +# - Model costs are in USD per million tokens (input/output) +# - Vision models support max 5 images per request, 33MP max resolution +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai" +structure_method = "instructor/json" + +################################################################################ +# PRODUCTION TEXT MODELS +################################################################################ + +# --- Meta Llama 3.x Series ---------------------------------------------------- +["llama-3.1-8b-instant"] +model_id = "llama-3.1-8b-instant" +max_tokens = 131072 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.05, output = 0.08 } + +["llama-3.3-70b-versatile"] +model_id = "llama-3.3-70b-versatile" +max_tokens = 32768 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.59, output = 0.79 } + +# --- Meta Llama Guard --------------------------------------------------------- +[llama-guard-4-12b] +model_id = "meta-llama/llama-guard-4-12b" +max_tokens = 1024 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.20, output = 0.20 } + +# --- OpenAI GPT-OSS Models ---------------------------------------------------- +[gpt-oss-20b] +model_id = "openai/gpt-oss-20b" +max_tokens = 65536 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.075, output = 0.30 } + +[gpt-oss-120b] +model_id = "openai/gpt-oss-120b" +max_tokens = 65536 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.60 } + +# --- Groq Compound Systems ---------------------------------------------------- +["groq/compound"] +model_id = "groq/compound" +max_tokens = 8192 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.45 } + +["groq/compound-mini"] +model_id = "groq/compound-mini" +max_tokens = 8192 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.10, output = 0.30 } + +################################################################################ +# PREVIEW MODELS +################################################################################ + +# --- Meta Llama 4 Vision Models (Preview) ------------------------------------- +[llama-4-scout-17b-16e-instruct] +model_id = "meta-llama/llama-4-scout-17b-16e-instruct" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 5 +costs = { input = 0.11, output = 0.34 } + +[llama-4-maverick-17b-128e-instruct] +model_id = "meta-llama/llama-4-maverick-17b-128e-instruct" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 5 +costs = { input = 0.20, output = 0.60 } + +# --- Moonshot Kimi K2 --------------------------------------------------------- +[kimi-k2-instruct-0905] +model_id = "moonshotai/kimi-k2-instruct-0905" +max_tokens = 16384 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.00, output = 3.00 } + +# --- OpenAI Safety Model ------------------------------------------------------ +[gpt-oss-safeguard-20b] +model_id = "openai/gpt-oss-safeguard-20b" +max_tokens = 65536 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.075, output = 0.30 } + +# --- Qwen 3 ------------------------------------------------------------------- +[qwen3-32b] +model_id = "qwen/qwen3-32b" +max_tokens = 40960 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.29, output = 0.59 } diff --git a/.pipelex/inference/backends/huggingface.toml b/.pipelex/inference/backends/huggingface.toml new file mode 100644 index 0000000..1a79638 --- /dev/null +++ b/.pipelex/inference/backends/huggingface.toml @@ -0,0 +1,43 @@ +################################################################################ +# Hugging Face Backend Configuration +################################################################################ +# +# This file defines the model specifications for Hugging Face models. +# It contains model definitions for various image generation models +# accessible through the Hugging Face Inference API with provider="auto". +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots or slashes must be quoted (e.g., ["stabilityai/stable-diffusion-2-1"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "img_gen" +sdk = "huggingface_img_gen" + +################################################################################ +# IMAGE GENERATION MODELS +################################################################################ + +# --- Qwen Image Models -------------------------------------------------- +[qwen-image] +model_id = "Qwen/Qwen-Image" +inputs = ["text"] +outputs = ["image"] +costs = { input = 0.0, output = 0.0 } +variant = "fal-ai" +# variant = "replicate" + +[qwen-image.rules] +prompt = "with_negative" +aspect_ratio = "qwen_image" +inference = "qwen_image" diff --git a/.pipelex/inference/backends/internal.toml b/.pipelex/inference/backends/internal.toml new file mode 100644 index 0000000..e44b222 --- /dev/null +++ b/.pipelex/inference/backends/internal.toml @@ -0,0 +1,37 @@ +################################################################################ +# Internal Backend Configuration +################################################################################ +# +# This file defines the model specifications for internal software-only models. +# These models run internally without external APIs or AI services. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# TEXT EXTRACTION MODELS +################################################################################ + +# --- PyPDFium2 Text Extractor ------------------------------------------------- +[pypdfium2-extract-pdf] +model_type = "text_extractor" +sdk = "pypdfium2" +model_id = "extract-text" +inputs = ["pdf"] +outputs = ["pages"] +costs = {} + +# --- Docling Text Extractor --------------------------------------------------- +[docling-extract-text] +model_type = "text_extractor" +sdk = "docling_sdk" +model_id = "extract-text" +inputs = ["pdf", "image"] +outputs = ["pages"] +costs = {} diff --git a/.pipelex/inference/backends/mistral.toml b/.pipelex/inference/backends/mistral.toml new file mode 100644 index 0000000..b695ac7 --- /dev/null +++ b/.pipelex/inference/backends/mistral.toml @@ -0,0 +1,164 @@ +################################################################################ +# Mistral Backend Configuration +################################################################################ +# +# This file defines the model specifications for Mistral AI models. +# It contains model definitions for various Mistral language models and specialized models +# accessible through the Mistral API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["ministral-3b"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "mistral" +prompting_target = "mistral" +structure_method = "instructor/mistral_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- Ministral Series --------------------------------------------------------- +[ministral-3b] +model_id = "ministral-3b-latest" +max_tokens = 131072 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.04, output = 0.04 } + +[ministral-8b] +model_id = "ministral-8b-latest" +max_tokens = 131072 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.1, output = 0.1 } + +# --- Mistral 7B Series -------------------------------------------------------- +[mistral-7b-2312] +model_id = "mistral-large-2402" +max_tokens = 32768 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.25, output = 0.25 } + +# --- Mistral 8x7B Series ------------------------------------------------------ +[mistral-8x7b-2312] +model_id = "open-mixtral-8x7b" +max_tokens = 32768 +inputs = ["text"] +outputs = ["text"] +costs = { input = 0.7, output = 0.7 } + +# --- Mistral Codestral Series ------------------------------------------------- +[mistral-codestral-2405] +model_id = "codestral-2405" +max_tokens = 262144 +inputs = ["text"] +outputs = ["text"] +costs = { input = 1.0, output = 3.0 } + +# --- Mistral Large Series ----------------------------------------------------- +[mistral-large-2402] +model_id = "mistral-large-2402" +max_tokens = 32768 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 4.0, output = 12.0 } + +[mistral-large] +model_id = "mistral-large-latest" +max_tokens = 131072 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 4.0, output = 12.0 } + +# --- Mistral Small Series ----------------------------------------------------- +[mistral-small-2402] +model_id = "mistral-small-2402" +max_tokens = 32768 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.0, output = 3.0 } + +[mistral-small] +model_id = "mistral-small-latest" +max_tokens = 32768 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.0, output = 3.0 } + +# --- Pixtral Series ----------------------------------------------------------- +[pixtral-12b] +model_id = "pixtral-12b-latest" +max_tokens = 131072 +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.15 } + +[pixtral-large] +model_id = "pixtral-large-latest" +max_tokens = 131072 +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.0, output = 6.0 } + +# --- Mistral Medium Series ---------------------------------------------------- +[mistral-medium] +model_id = "mistral-medium-latest" +max_tokens = 128000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.4, output = 2.0 } + +[mistral-medium-2508] +model_id = "mistral-medium-2508" +max_tokens = 128000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.4, output = 2.0 } + +################################################################################ +# EXTRACTION MODELS +################################################################################ + +# TODO: add support to pricing per page + +[mistral-ocr-2503] +model_type = "text_extractor" +model_id = "mistral-ocr-2503" +max_tokens = 16384 +inputs = ["pdf", "image"] +outputs = ["pages"] + +[mistral-ocr-2505] +model_type = "text_extractor" +model_id = "mistral-ocr-2505" +max_tokens = 16384 +inputs = ["pdf", "image"] +outputs = ["pages"] + +[mistral-ocr-2512] +model_type = "text_extractor" +model_id = "mistral-ocr-2512" +max_tokens = 16384 +inputs = ["pdf", "image"] +outputs = ["pages"] + +[mistral-ocr] +model_type = "text_extractor" +model_id = "mistral-ocr-latest" +max_tokens = 16384 +inputs = ["pdf", "image"] +outputs = ["pages"] diff --git a/.pipelex/inference/backends/ollama.toml b/.pipelex/inference/backends/ollama.toml new file mode 100644 index 0000000..397e9ac --- /dev/null +++ b/.pipelex/inference/backends/ollama.toml @@ -0,0 +1,63 @@ +################################################################################ +# Ollama Backend Configuration +################################################################################ +# +# This file defines the model specifications for Ollama models. +# It contains model definitions for local language models +# accessible through the Ollama API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["mistral-small3.1-24b"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai" +prompting_target = "anthropic" +structure_method = "instructor/openai_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- Gemma Models ------------------------------------------------------------- +[gemma3-4b] +model_id = "gemma3:4b" +inputs = ["text"] +outputs = ["text"] +max_prompt_images = 3000 +costs = { input = 0, output = 0 } + +# --- Llama Models ------------------------------------------------------------- +[llama4-scout] +model_id = "llama4:scout" +inputs = ["text"] +outputs = ["text"] +max_prompt_images = 3000 +costs = { input = 0, output = 0 } + +# --- Mistral Models ----------------------------------------------------------- +["mistral-small3.1-24b"] +model_id = "mistral-small3.1:24b" +inputs = ["text"] +outputs = ["text"] +max_prompt_images = 3000 +costs = { input = 0, output = 0 } + +# --- Qwen Models -------------------------------------------------------------- +[qwen3-8b] +model_id = "qwen3:8b" +inputs = ["text"] +outputs = ["text"] +costs = { input = 0, output = 0 } +# TODO: support tokens diff --git a/.pipelex/inference/backends/openai.toml b/.pipelex/inference/backends/openai.toml new file mode 100644 index 0000000..e61d52e --- /dev/null +++ b/.pipelex/inference/backends/openai.toml @@ -0,0 +1,208 @@ +################################################################################ +# OpenAI Backend Configuration +################################################################################ +# +# This file defines the model specifications for OpenAI models. +# It contains model definitions for various LLM and image generation models +# accessible through the OpenAI API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai_responses" +prompting_target = "openai" +structure_method = "instructor/openai_responses_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- GPT-3.5 Series ----------------------------------------------------------- +["gpt-3.5-turbo"] +model_id = "gpt-3.5-turbo-1106" +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.5, output = 1.5 } + +# --- GPT-4 Series ------------------------------------------------------------- +[gpt-4] +inputs = ["text"] +outputs = ["text"] +costs = { input = 30.0, output = 60.0 } + +[gpt-4-turbo] +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 10.0, output = 30.0 } + +# --- GPT-4o Series ------------------------------------------------------------ +[gpt-4o-2024-11-20] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.5, output = 10.0 } + +[gpt-4o] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.5, output = 10.0 } + +[gpt-4o-mini-2024-07-18] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.6 } + +[gpt-4o-mini] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.6 } + +# --- GPT-4.1 Series ----------------------------------------------------------- +["gpt-4.1"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2, output = 8 } + +["gpt-4.1-mini"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.4, output = 1.6 } + +["gpt-4.1-nano"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.1, output = 0.4 } + +# --- o Series ---------------------------------------------------------------- +[o1] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 15.0, output = 60.0 } +valued_constraints = { fixed_temperature = 1 } + +[o3-mini] +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.1, output = 4.4 } +valued_constraints = { fixed_temperature = 1 } + +[o3] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 10.0, output = 40.0 } +valued_constraints = { fixed_temperature = 1 } + +[o4-mini] +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.1, output = 4.4 } +valued_constraints = { fixed_temperature = 1 } + +# --- GPT-5 Series ------------------------------------------------------------- +[gpt-5] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +[gpt-5-mini] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.25, output = 2.0 } +valued_constraints = { fixed_temperature = 1 } + +[gpt-5-nano] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.05, output = 0.4 } +valued_constraints = { fixed_temperature = 1 } + +[gpt-5-chat] +model_id = "gpt-5-chat-latest" +inputs = ["text", "images"] +outputs = ["text"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +[gpt-5-codex] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +# --- GPT-5.1 Series ------------------------------------------------------------- +["gpt-5.1"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } + +["gpt-5.1-chat"] +model_id = "gpt-5.1-chat-latest" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +["gpt-5.1-codex"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +["gpt-5.1-codex-max"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } + +# --- GPT-5.2 Series ------------------------------------------------------------- +["gpt-5.2"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.75, output = 14.0 } + +["gpt-5.2-chat"] +model_id = "gpt-5.2-chat-latest" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.75, output = 14.0 } +valued_constraints = { fixed_temperature = 1 } + +################################################################################ +# IMAGE GENERATION MODELS +################################################################################ + +# --- OpenAI Image Generation -------------------------------------------------- +[gpt-image-1] +sdk = "openai_img_gen" +model_type = "img_gen" +inputs = ["text"] +outputs = ["image"] +costs = { input = 10, output = 40 } + +[gpt-image-1-mini] +sdk = "openai_img_gen" +model_type = "img_gen" +inputs = ["text"] +outputs = ["image"] +costs = { input = 2.5, output = 8 } + +["gpt-image-1.5"] +sdk = "openai_img_gen" +model_type = "img_gen" +model_id = "gpt-image-1.5" +inputs = ["text"] +outputs = ["image"] +costs = { input = 8, output = 32 } diff --git a/.pipelex/inference/backends/pipelex_gateway.toml b/.pipelex/inference/backends/pipelex_gateway.toml new file mode 100644 index 0000000..bca075b --- /dev/null +++ b/.pipelex/inference/backends/pipelex_gateway.toml @@ -0,0 +1,41 @@ +################################################################################ +# Pipelex Gateway Local Overrides +################################################################################ +# +# TELEMETRY NOTICE: +# +# Using Pipelex Gateway enables identified telemetry tied to your API key +# (hashed for security). This is independent from your telemetry.toml settings. +# +# We collect only technical data (model names, token counts, latency, error rates). +# We do NOT collect prompts, completions, pipe codes, or business data. +# +# This allows us to monitor service quality, enforce fair usage, and support you. +# +################################################################################ +# +# WARNING: USE AT YOUR OWN RISK! +# +# The actual model configuration is fetched remotely from Pipelex servers. +# Any override in this file may cause unexpected behavior or failures, +# as the remote configuration may change at any time. +# +# If you must override, you may ONLY use these keys per model: +# - sdk +# - structure_method +# +# All other keys will be ignored. +# +# If you need custom configurations, consider using your own API keys +# with direct provider backends (openai, anthropic, etc.) instead. +# +# Documentation: +# https://docs.pipelex.com/home/7-configuration/config-technical/inference-backend-config/ +# Support: https://go.pipelex.com/discord +# +################################################################################ + +# Per-model overrides example: +# [gpt-4o] +# sdk = "gateway_completions" +# structure_method = "instructor/openai_tools" diff --git a/.pipelex/inference/backends/pipelex_inference.toml b/.pipelex/inference/backends/pipelex_inference.toml new file mode 100644 index 0000000..751c570 --- /dev/null +++ b/.pipelex/inference/backends/pipelex_inference.toml @@ -0,0 +1,205 @@ +################################################################################ +# Pipelex Inference Backend Configuration +################################################################################ +# +# This file defines the model specifications for the Pipelex Inference backend. +# It contains model definitions for various LLM and image generation models +# accessible through the Pipelex unified inference API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai" +prompting_target = "anthropic" +structure_method = "instructor/openai_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- OpenAI LLMs -------------------------------------------------------------- +[gpt-4o] +model_id = "pipelex/gpt-4o" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.75, output = 11.00 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +[gpt-4o-mini] +model_id = "pipelex/gpt-4o-mini" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.17, output = 0.66 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +["gpt-4.1"] +model_id = "pipelex/gpt-4.1" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2, output = 8 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +["gpt-4.1-mini"] +model_id = "pipelex/gpt-4.1-mini" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.4, output = 1.6 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +["gpt-4.1-nano"] +model_id = "pipelex/gpt-4.1-nano" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.1, output = 0.4 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +[gpt-5-nano] +model_id = "pipelex/gpt-5-nano" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.05, output = 0.40 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +[gpt-5-mini] +model_id = "pipelex/gpt-5-mini" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.25, output = 2.00 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +[gpt-5-chat] +model_id = "pipelex/gpt-5-chat" +inputs = ["text", "images"] +outputs = ["text"] +costs = { input = 1.25, output = 10.00 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +[gpt-5] +model_id = "pipelex/gpt-5" +inputs = ["text", "images"] +outputs = ["text"] +costs = { input = 1.25, output = 10.00 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +["gpt-5.1"] +model_id = "pipelex/gpt-5.1" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.00 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +["gpt-5.1-chat"] +model_id = "pipelex/gpt-5.1-chat" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.00 } +sdk = "openai_responses" +structure_method = "instructor/openai_responses_tools" + +# --- Claude LLMs -------------------------------------------------------------- +["claude-4-sonnet"] +model_id = "pipelex/claude-4-sonnet" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 3, output = 15 } + +["claude-4.1-opus"] +model_id = "pipelex/claude-4.1-opus" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 15, output = 75 } + +["claude-4.5-sonnet"] +model_id = "pipelex/claude-4.5-sonnet" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 3, output = 15 } + +["claude-4.5-haiku"] +model_id = "pipelex/claude-4.5-haiku" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1, output = 5 } + +["claude-4.5-opus"] +model_id = "pipelex/claude-4.5-opus" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 5, output = 25 } + +# --- Gemini LLMs -------------------------------------------------------------- +["gemini-2.0-flash"] +model_id = "pipelex/gemini-2.0-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.10, output = 0.40 } + +["gemini-2.5-pro"] +model_id = "pipelex/gemini-2.5-pro" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 1.25, output = 10.0 } + +["gemini-2.5-flash"] +model_id = "pipelex/gemini-2.5-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.30, output = 2.50 } + +["gemini-2.5-flash-lite"] +model_id = "pipelex/gemini-2.5-flash-lite" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.10, output = 0.40 } + +["gemini-3.0-pro"] +model_id = "pipelex/gemini-3.0-pro" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 2, output = 12.0 } + +# --- XAI LLMs -------------------------------------------------------------- + +[grok-3] +model_id = "grok-3" +inputs = ["text"] +outputs = ["text"] +costs = { input = 3, output = 15 } + +[grok-3-mini] +model_id = "grok-3-mini" +inputs = ["text"] +outputs = ["text"] +costs = { input = 0.3, output = 0.5 } + +################################################################################ +# OCR and IMAGE GENERATION MODELS +################################################################################ + +# We are still working in giving you acces to OCR and image generation models +# and to the best models from Mistral through the Pipelex Inference backend. diff --git a/.pipelex/inference/backends/portkey.toml b/.pipelex/inference/backends/portkey.toml new file mode 100644 index 0000000..75e2574 --- /dev/null +++ b/.pipelex/inference/backends/portkey.toml @@ -0,0 +1,263 @@ +################################################################################ +# Portkey Configuration +################################################################################ +# +# This file defines the model specifications for the Portkey backend. +# It contains model definitions for various AI models. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "portkey_completions" +structure_method = "instructor/openai_tools" +prompting_target = "anthropic" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- OpenAI LLMs -------------------------------------------------------------- +[gpt-4o-mini] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.6 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[gpt-4o] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2.5, output = 10.0 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +["gpt-4.1-nano"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.1, output = 0.4 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +["gpt-4.1-mini"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.4, output = 1.6 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +["gpt-4.1"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2, output = 8 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[o1] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 15.0, output = 60.0 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[o3-mini] +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 1.1, output = 4.4 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[o3] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 2, output = 8 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[o4-mini] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.1, output = 4.4 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[gpt-5-nano] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.05, output = 0.4 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[gpt-5-mini] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.25, output = 2.0 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +[gpt-5] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +["gpt-5.1"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +["gpt-5.1-codex"] +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 1.25, output = 10.0 } +valued_constraints = { fixed_temperature = 1 } +sdk = "portkey_responses" +structure_method = "instructor/openai_responses_tools" +x-portkey-provider = "@openai" + +# --- Claude LLMs -------------------------------------------------------------- +[claude-3-haiku] +model_id = "claude-3-haiku-20240307" +max_tokens = 4096 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 0.25, output = 1.25 } +x-portkey-provider = "@anthropic" + +["claude-3.7-sonnet"] +model_id = "claude-3-7-sonnet-20250219" +max_tokens = 8192 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } +x-portkey-provider = "@anthropic" + +[claude-4-sonnet] +model_id = "claude-sonnet-4-20250514" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } +x-portkey-provider = "@anthropic" + +[claude-4-opus] +model_id = "claude-opus-4-20250514" +max_tokens = 32000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } +x-portkey-provider = "@anthropic" + +["claude-4.1-opus"] +model_id = "claude-opus-4-1-20250805" +max_tokens = 32000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } +x-portkey-provider = "@anthropic" + +["claude-4.5-sonnet"] +model_id = "claude-sonnet-4-5-20250929" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 3.0, output = 15.0 } +x-portkey-provider = "@anthropic" + +["claude-4.5-haiku"] +model_id = "claude-haiku-4-5-20251001" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 1.0, output = 5.0 } +x-portkey-provider = "@anthropic" + +["claude-4.5-opus"] +model_id = "claude-opus-4-5-20251101" +max_tokens = 64000 +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 100 +costs = { input = 5.0, output = 25.0 } +x-portkey-provider = "@anthropic" + +# --- Gemini LLMs -------------------------------------------------------------- +["gemini-2.0-flash"] +model_id = "gemini-2.0-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.10, output = 0.40 } +x-portkey-provider = "@google" + +["gemini-2.5-pro"] +model_id = "gemini-2.5-pro" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 1.25, output = 10.0 } +x-portkey-provider = "@google" + +["gemini-2.5-flash"] +model_id = "gemini-2.5-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.30, output = 2.50 } +x-portkey-provider = "@google" + +["gemini-2.5-flash-lite"] +model_id = "gemini-2.5-flash-lite" +inputs = ["text", "images"] +outputs = ["text", "structured"] +costs = { input = 0.10, output = 0.40 } +x-portkey-provider = "@google" + +["gemini-3.0-pro"] +model_id = "gemini-3-pro-preview" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 2, output = 12.0 } +x-portkey-provider = "@google" diff --git a/.pipelex/inference/backends/scaleway.toml b/.pipelex/inference/backends/scaleway.toml new file mode 100644 index 0000000..20fe792 --- /dev/null +++ b/.pipelex/inference/backends/scaleway.toml @@ -0,0 +1,67 @@ +################################################################################ +# Groq Backend Configuration +################################################################################ +# +# This file defines the model specifications for Scaleway models. +# It contains model definitions for various LLM models accessible through +# the Groq API, including text-only and vision-capable models. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots or slashes must be quoted (e.g., ["meta-llama/llama-4-scout"]) +# - Model costs are in USD per million tokens (input/output) +# - Vision models support max 5 images per request, 33MP max resolution +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai" +structure_method = "instructor/json" + +# --- DeepSeek Models ---------------------------------------------------------- +[deepseek-r1-distill-llama-70b] +max_tokens = 32768 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.90, output = 0.90 } + +# --- Meta Llama 3.x Series ---------------------------------------------------- +["llama-3.1-8b-instruct"] +max_tokens = 131072 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.2, output = 0.2 } + +["llama-3.3-70b-instruct"] +max_tokens = 32768 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.90, output = 0.90 } + +# --- OpenAI GPT-OSS Models ---------------------------------------------------- +[gpt-oss-120b] +max_tokens = 65536 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.15, output = 0.60 } + +# --- Qwen 3 ------------------------------------------------------------------- +[qwen3-235b-a22b-instruct-2507] +max_tokens = 40960 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.75, output = 2.25 } + +[qwen3-coder-30b-a3b-instruct] +max_tokens = 40960 +inputs = ["text"] +outputs = ["text", "structured"] +costs = { input = 0.20, output = 0.80 } diff --git a/.pipelex/inference/backends/vertexai.toml b/.pipelex/inference/backends/vertexai.toml new file mode 100644 index 0000000..1ebab79 --- /dev/null +++ b/.pipelex/inference/backends/vertexai.toml @@ -0,0 +1,54 @@ +################################################################################ +# VertexAI Backend Configuration +################################################################################ +# +# This file defines the model specifications for Google VertexAI models. +# It contains model definitions for Gemini language models +# accessible through the Google VertexAI API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["gemini-2.0-flash"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai" +prompting_target = "gemini" +structure_method = "instructor/vertexai_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- Gemini 2.0 Series -------------------------------------------------------- +["gemini-2.0-flash"] +model_id = "google/gemini-2.0-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 0.1, output = 0.4 } + +# --- Gemini 2.5 Series -------------------------------------------------------- +["gemini-2.5-pro"] +model_id = "google/gemini-2.5-pro" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 1.25, output = 10.0 } + +["gemini-2.5-flash"] +model_id = "google/gemini-2.5-flash" +inputs = ["text", "images"] +outputs = ["text", "structured"] +max_prompt_images = 3000 +costs = { input = 0.30, output = 2.50 } diff --git a/.pipelex/inference/backends/xai.toml b/.pipelex/inference/backends/xai.toml new file mode 100644 index 0000000..3045344 --- /dev/null +++ b/.pipelex/inference/backends/xai.toml @@ -0,0 +1,56 @@ +################################################################################ +# XAI Backend Configuration +################################################################################ +# +# This file defines the model specifications for XAI (formerly Twitter AI) models. +# It contains model definitions for Grok language models +# accessible through the XAI API. +# +# Configuration structure: +# - Each model is defined in its own section with the model name as the header +# - Headers with dots must be quoted (e.g., ["grok-3"]) +# - Model costs are in USD per million tokens (input/output) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +################################################################################ + +################################################################################ +# MODEL DEFAULTS +################################################################################ + +[defaults] +model_type = "llm" +sdk = "openai" +prompting_target = "anthropic" +structure_method = "instructor/openai_tools" + +################################################################################ +# LANGUAGE MODELS +################################################################################ + +# --- Grok 3 Series ------------------------------------------------------------ +[grok-3] +model_id = "grok-3" +inputs = ["text"] +outputs = ["text"] +costs = { input = 3, output = 15 } + +[grok-3-mini] +model_id = "grok-3-mini" +inputs = ["text"] +outputs = ["text"] +costs = { input = 0.3, output = 0.5 } + +[grok-3-fast] +model_id = "grok-3-fast-latest" +inputs = ["text"] +outputs = ["text"] +costs = { input = 5, output = 25 } + +[grok-3-mini-fast] +model_id = "grok-3-mini-fast-latest" +inputs = ["text"] +outputs = ["text"] +costs = { input = 0.15, output = 4 } diff --git a/.pipelex/inference/deck/base_deck.toml b/.pipelex/inference/deck/base_deck.toml new file mode 100644 index 0000000..63ab39e --- /dev/null +++ b/.pipelex/inference/deck/base_deck.toml @@ -0,0 +1,202 @@ +#################################################################################################### +# Pipelex Model Deck - Base Configuration +#################################################################################################### +# +# This file defines model aliases and presets for: +# - LLMs (language models for text generation and structured output) +# - Image generation models (for creating images from text prompts) +# - Document extraction models (OCR and text extraction from PDFs/images) +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +#################################################################################################### + +#################################################################################################### +# Aliases +#################################################################################################### + +[aliases] +base-claude = "claude-4.5-sonnet" +base-gpt = "gpt-4o" +base-gemini = "gemini-2.5-flash" +base-mistral = "mistral-medium" +base-groq = "llama-3.3-70b-versatile" +base-grok = "grok-4-fast-non-reasoning" + +best-gpt = "gpt-5.1" +best-claude = "claude-4.5-opus" +best-gemini = "gemini-3.0-pro" +best-mistral = "mistral-medium" + +# Groq-specific aliases +fast-groq = "llama-3.1-8b-instant" +vision-groq = "llama-4-scout-17b-16e-instruct" + +# Image generation aliases +base-img-gen = "flux-pro/v1.1" +best-img-gen = "flux-2" +fast-img-gen = "fast-lightning-sdxl" + +#################################################################################################### +# Waterfalls +#################################################################################################### + +[waterfalls] + +# --- Waterfalls for LLMs --------------------------------------------------------------------- +smart_llm = [ + "claude-4.5-opus", + "claude-4.5-sonnet", + "gemini-3.0-pro", + "gpt-5.1", + "claude-4.1-opus", + "gemini-2.5-pro", + "claude-4-sonnet", + "grok-4-fast-non-reasoning", +] +smart_llm_with_vision = [ + "claude-4.5-opus", + "claude-4.5-sonnet", + "gemini-3.0-pro", + "gpt-5.1", + "claude-4.1-opus", + "gemini-2.5-pro", + "claude-4-sonnet", + "grok-4-fast-non-reasoning", +] +smart_llm_for_structured = [ + "claude-4.5-opus", + "claude-4.5-sonnet", + "gemini-3.0-pro", + "gpt-5.1", + "claude-4.1-opus", + "claude-4-sonnet", +] +llm_for_creativity = [ + "claude-4.5-opus", + "claude-4.1-opus", + "gemini-2.5-pro", + "gpt-5.1", +] +llm_for_large_codebase = [ + "gemini-2.5-pro", + "claude-4.5-sonnet", + "gemini-3.0-pro", + "gpt-5.1", + "gemini-2.5-flash", + "grok-4-fast-non-reasoning", +] +cheap_llm = [ + "gpt-4o-mini", + "gemini-2.5-flash-lite", + "mistral-small", + "claude-3-haiku", + "grok-3-mini", +] +cheap_llm_for_vision = [ + "gemini-2.5-flash-lite", + "gpt-4o-mini", + "claude-3-haiku", +] +cheap_llm_for_structured = ["gpt-4o-mini", "mistral-small", "claude-3-haiku"] +cheap_llm_for_creativity = [ + "gemini-2.5-flash", + "grok-3-mini", + "gpt-4o-mini", + "claude-4.5-haiku", +] + +# --- Waterfalls for Extracts --------------------------------------------------------------------- +pdf_text_extractor = [ + "azure-document-intelligence", + "mistral-ocr", + "pypdfium2-extract-pdf", +] +image_text_extractor = ["mistral-ocr"] + +#################################################################################################### +# LLM Presets +#################################################################################################### + +[llm.presets] + +# LLM Presets — Specific skills ------------------------------------------------------------- + +# Generation skills +llm_for_factual_writing = { model = "base-gpt", temperature = 0.1 } +llm_for_creative_writing = { model = "base-gpt", temperature = 0.9 } +llm_for_writing_cheap = { model = "gpt-4o-mini", temperature = 0.3 } + +# Retrieve and answer questions skills +llm_to_answer_questions_cheap = { model = "gpt-4o-mini", temperature = 0.3 } +llm_to_answer_questions = { model = "base-claude", temperature = 0.3 } +llm_to_retrieve = { model = "base-claude", temperature = 0.1 } + +# Engineering skills +llm_to_engineer = { model = "smart_llm_for_structured", temperature = 0.2 } +llm_to_code = { model = "base-claude", temperature = 0.1 } +llm_to_analyze_large_codebase = { model = "base-claude", temperature = 0.1 } + +# Vision skills +llm_for_img_to_text_cheap = { model = "gpt-4o-mini", temperature = 0.1 } +llm_for_img_to_text = { model = "base-claude", temperature = 0.1 } +llm_for_diagram_to_text = { model = "best-claude", temperature = 0.3 } +llm_for_table_to_text = { model = "base-claude", temperature = 0.3 } + +# Image generation prompting skills +llm_to_prompt_img_gen = { model = "base-claude", temperature = 0.2 } +llm_to_prompt_img_gen_cheap = { model = "gpt-4o-mini", temperature = 0.5 } + +# Groq-specific presets (fast inference, low cost) +llm_groq_fast_text = { model = "fast-groq", temperature = 0.7 } +llm_groq_balanced = { model = "base-groq", temperature = 0.5 } +llm_groq_vision = { model = "vision-groq", temperature = 0.3 } + +# LLM Presets — For Testing --------------------------------------------------------------------- + +llm_for_testing_gen_text = { model = "cheap_llm", temperature = 0.5 } +llm_for_testing_gen_object = { model = "cheap_llm_for_structured", temperature = 0.1 } +llm_for_testing_vision = { model = "cheap_llm_for_vision", temperature = 0.5 } +llm_for_testing_vision_structured = { model = "cheap_llm_for_vision", temperature = 0.5 } + +#################################################################################################### +# LLM Choices +#################################################################################################### + +[llm.choice_defaults] +for_text = "cheap_llm" +for_object = "cheap_llm_for_structured" + +#################################################################################################### +# Extract Presets +#################################################################################################### + +[extract] +choice_default = "extract_ocr_from_document" + +[extract.presets] +extract_ocr_from_document = { model = "azure-document-intelligence", max_nb_images = 100, image_min_size = 50 } +extract_basic_from_pdf = { model = "pypdfium2-extract-pdf", max_nb_images = 100, image_min_size = 50 } + +#################################################################################################### +# Image Generation Presets +#################################################################################################### + +[img_gen] +choice_default = "gen_image_basic" + +[img_gen.presets] + +# General purpose +gen_image_basic = { model = "base-img-gen", quality = "medium", guidance_scale = 7.5, is_moderated = true, safety_tolerance = 3 } +gen_image_fast = { model = "fast-img-gen", nb_steps = 4, guidance_scale = 5.0, is_moderated = true, safety_tolerance = 3 } +gen_image_high_quality = { model = "best-img-gen", quality = "high", guidance_scale = 8.0, is_moderated = true, safety_tolerance = 3 } +gen_image_openai_low_quality = { model = "gpt-image-1", quality = "low" } + +# Specific skills +img_gen_for_art = { model = "best-img-gen", quality = "high", guidance_scale = 9.0, is_moderated = false, safety_tolerance = 5 } +img_gen_for_diagram = { model = "base-img-gen", quality = "medium", guidance_scale = 7.0, is_moderated = true, safety_tolerance = 2 } +img_gen_for_mockup = { model = "base-img-gen", quality = "medium", guidance_scale = 6.5, is_moderated = true, safety_tolerance = 3 } +img_gen_for_product = { model = "best-img-gen", quality = "high", guidance_scale = 8.5, is_moderated = true, safety_tolerance = 2 } +img_gen_for_testing = { model = "fast-img-gen", nb_steps = 4, guidance_scale = 4.0, is_moderated = true, safety_tolerance = 4 } diff --git a/.pipelex/inference/deck/overrides.toml b/.pipelex/inference/deck/overrides.toml new file mode 100644 index 0000000..08814db --- /dev/null +++ b/.pipelex/inference/deck/overrides.toml @@ -0,0 +1,19 @@ +#################################################################################################### +# Pipelex Model Deck - Overrides +#################################################################################################### +# +# This file allows you to override the default model choices defined in base_deck.toml. +# You can customize presets for LLMs, image generation, and document extraction models. +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +#################################################################################################### + +#################################################################################################### +# LLM Deck overrides +#################################################################################################### + +[llm.choice_overrides] +for_text = "disabled" +for_object = "disabled" diff --git a/.pipelex/inference/routing_profiles.toml b/.pipelex/inference/routing_profiles.toml new file mode 100644 index 0000000..bf40281 --- /dev/null +++ b/.pipelex/inference/routing_profiles.toml @@ -0,0 +1,173 @@ +# Routing profile library - Routes models to their backends +# ========================================================================================= +# This file controls which backend serves which model. +# Simply change the 'active' field to switch profiles, +# or you can add your own custom profiles. +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# ========================================================================================= + +# Which profile to use (change this to switch routing) +active = "pipelex_gateway_first" + +# We recommend using the "pipelex_gateway_first" profile to get a head start with all models. +# To use the Pipelex Gateway backend: +# 1. Join our Discord community to get your free API key (no credit card required): +# Visit https://go.pipelex.com/discord and request your key in the appropriate channel +# 2. Set the environment variable (or add it to your .env file): +# - Linux/macOS: export PIPELEX_GATEWAY_API_KEY="your-api-key" +# - Windows CMD: set PIPELEX_GATEWAY_API_KEY=your-api-key +# - Windows PowerShell: $env:PIPELEX_GATEWAY_API_KEY="your-api-key" +# 3. The .pipelex/inference/backends.toml is already configured with api_key = "${PIPELEX_GATEWAY_API_KEY}" +# which will get the key from the environment variable. + +# ========================================================================================= +# Routing Profiles +# ========================================================================================= + +[profiles.pipelex_gateway_first] +description = "Use Pipelex Gateway backend for all its supported models" +default = "pipelex_gateway" +fallback_order = [ + "pipelex_gateway", + "azure_openai", + "bedrock", + "google", + "blackboxai", + "mistral", + "fal", +] + +[profiles.pipelex_gateway_first.routes] +# Pattern matching: "model-pattern" = "backend-name" + +[profiles.pipelex_gateway_first.optional_routes] # Each optional route is considered only if its backend is available +"gpt-*" = "pipelex_gateway" +"gpt-image-1" = "openai" +"claude-*" = "pipelex_gateway" +"grok-*" = "pipelex_gateway" +"gemini-*" = "pipelex_gateway" +"*-sdxl" = "fal" +"flux-*" = "fal" +"mistral-ocr" = "mistral" + +[profiles.all_pipelex_gateway] +description = "Use Pipelex Gateway for all its supported models" +default = "pipelex_gateway" + +[profiles.all_anthropic] +description = "Use Anthropic backend for all its supported models" +default = "anthropic" + +[profiles.all_azure_openai] +description = "Use Azure OpenAI backend for all its supported models" +default = "azure_openai" + +[profiles.all_bedrock] +description = "Use Bedrock backend for all its supported models" +default = "bedrock" + +[profiles.all_blackboxai] +description = "Use BlackBoxAI backend for all its supported models" +default = "blackboxai" + +[profiles.all_fal] +description = "Use FAL backend for all its supported models" +default = "fal" + +[profiles.all_google] +description = "Use Google GenAI backend for all its supported models" +default = "google" + +[profiles.all_groq] +description = "Use groq backend for all its supported models" +default = "groq" + +[profiles.all_huggingface] +description = "Use HuggingFace backend for all its supported models" +default = "huggingface" + +[profiles.all_mistral] +description = "Use Mistral backend for all its supported models" +default = "mistral" + +[profiles.all_ollama] +description = "Use Ollama backend for all its supported models" +default = "ollama" + +[profiles.all_openai] +description = "Use OpenAI backend for all its supported models" +default = "openai" + +[profiles.all_portkey] +description = "Use Portkey backend for all its supported models" +default = "portkey" + +[profiles.all_scaleway] +description = "Use Scaleway backend for all its supported models" +default = "scaleway" + +[profiles.all_xai] +description = "Use xAI backend for all its supported models" +default = "xai" + +[profiles.all_internal] +description = "Use internal backend for all its supported models" +default = "internal" + +# ========================================================================================= +# Custom Profiles +# ========================================================================================= +# Add your own profiles below following the same pattern: +# +# [profiles.your_profile_name] +# description = "What this profile does" +# default = "backend-name" # Where to route models by default +# [profiles.your_profile_name.routes] +# "model-pattern" = "backend-name" # Specific routing rules +# +# Pattern matching supports: +# - Exact names: "gpt-4o-mini" +# - Wildcards: "claude-*" (matches all models starting with claude-) +# - Partial wildcards: "*-sonnet" (matches all sonnet variants) + +# ========================================================================================= +# Example of a custom routing profile with mostly pattern matching and one specific model +# ========================================================================================= +[profiles.example_routing_using_patterns] +description = "Example routing profile using patterns" +default = "pipelex_gateway" + +[profiles.example_routing_using_patterns.routes] +# Pattern matching: "model-pattern" = "backend-name" +"gpt-*" = "azure_openai" +"claude-*" = "bedrock" +"gemini-*" = "google" +"grok-*" = "xai" +"*-sdxl" = "fal" +"flux-*" = "fal" +"gpt-image-1" = "openai" + +# ========================================================================================= +# Example of a custom routing profile with specific model matching +# ========================================================================================= + +[profiles.example_routing_using_specific_models] +description = "Example routing profile using specific models" + +[profiles.example_routing_using_specific_models.routes] +"gpt-5-nano" = "pipelex_gateway" +"gpt-4o-mini" = "blackboxai" +"gpt-5-mini" = "openai" +"gpt-5-chat" = "azure_openai" + +"claude-4-sonnet" = "pipelex_gateway" +"claude-3.7-sonnet" = "blackboxai" + +"gemini-2.5-flash-lite" = "pipelex_gateway" +"gemini-2.5-flash" = "blackboxai" +"gemini-2.5-pro" = "vertexai" + +"grok-3" = "pipelex_gateway" +"grok-3-mini" = "xai" diff --git a/.pipelex/pipelex.toml b/.pipelex/pipelex.toml new file mode 100644 index 0000000..4a2ea38 --- /dev/null +++ b/.pipelex/pipelex.toml @@ -0,0 +1,162 @@ +#################################################################################################### +# Pipelex Configuration File +#################################################################################################### +# +# This configuration file is copied to client projects' .pipelex/ directory when running: +# `pipelex init config` +# +# Purpose: +# - This file allows to override Pipelex's default settings for specific projects +# - Feel free to modify any settings below to suit your needs +# - You can add any configuration sections that exist in the main pipelex.toml +# +# Finding Available Settings: +# - See the full default configuration in: pipelex/pipelex.toml (in the Pipelex package) +# - See the configuration structure classes in: pipelex/config.py and pipelex/cogt/config_cogt.py +# +# Common customizations are proposed below, such as: +# - Logging levels and behavior +# - Excluded directories for scanning +# - LLM prompt dumping for debugging +# - Feature flags for tracking and reporting +# - Observer and reporting output directories +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +#################################################################################################### + +[pipelex.pipeline_execution_config] +# Uncomment to disable conversion of data URLs to pipelex-storage:// URIs +# is_normalize_data_urls_to_storage = false + +[pipelex.pipeline_execution_config.graph_config.data_inclusion] +# Uncomment to include stuff data in graph outputs: +stuff_json_content = true +stuff_text_content = true +stuff_html_content = true +error_stack_traces = true + +[pipelex.pipeline_execution_config.graph_config.graphs_inclusion] +# Uncomment to customize which graph outputs are generated (all enabled by default): +# graphspec_json = false +# mermaidflow_mmd = false +# mermaidflow_html = false +# reactflow_viewspec = false +# reactflow_html = false + +[pipelex.pipeline_execution_config.graph_config.reactflow_config] +# Uncomment to customize ReactFlow graph rendering: +# edge_type = "bezier" # Options: "bezier", "smoothstep", "step", "straight" +# nodesep = 50 # Horizontal spacing between nodes +# ranksep = 80 # Vertical spacing between ranks/levels +# initial_zoom = 1.0 # Initial zoom level (1.0 = 100%) +# pan_to_top = true # Pan to show top of graph on load + +[pipelex.storage_config] +# Storage method: "local" (default), "in_memory", "s3", or "gcp" +# method = "local" + +# Whether to fetch remote HTTP URLs and store them locally +# is_fetch_remote_content_enabled = true + +# --- Local Storage Configuration --- +# Uncomment to customize local storage settings: +[pipelex.storage_config.local] +# uri_format = "{primary_id}/{secondary_id}/{hash}.{extension}" +# local_storage_path = ".pipelex/storage" + +# --- AWS S3 Storage Configuration --- +# Uncomment to use S3 storage (requires boto3: `pip install pipelex[s3]`): +[pipelex.storage_config.s3] +# uri_format = "{primary_id}/{secondary_id}/{hash}.{extension}" +# bucket_name = "your-bucket-name" +# region = "us-east-1" +# signed_urls_lifespan_seconds = 3600 # Set to "disabled" for public URLs + +# --- Google Cloud Storage Configuration --- +# Uncomment to use GCP storage (requires google-cloud-storage: `pip install pipelex[gcp-storage]`): +[pipelex.storage_config.gcp] +# uri_format = "{primary_id}/{secondary_id}/{hash}.{extension}" +# bucket_name = "your-bucket-name" +# project_id = "your-project-id" +# signed_urls_lifespan_seconds = 3600 # Set to "disabled" for public URLs + +[pipelex.scan_config] +# Uncomment to customize the excluded directories for scanning +# excluded_dirs = [ +# ".venv", +# "venv", +# "env", +# ".env", +# "virtualenv", +# ".virtualenv", +# ".git", +# "__pycache__", +# ".pytest_cache", +# ".mypy_cache", +# ".ruff_cache", +# "node_modules", +# "results", +# ] + +[pipelex.builder_config] +# Uncomment to change where the generated pipelines are saved: +# default_output_dir = "." +# default_bundle_file_name = "bundle" +# default_directory_base_name = "pipeline" + +[pipelex.log_config] +# Uncomment to change the default log level: +# default_log_level = "INFO" + +# Uncomment to log to stderr instead of stdout +# console_log_target = "stderr" +# console_print_target = "stderr" + +[pipelex.log_config.package_log_levels] +# Uncomment to change the log level for specific packages: +# pipelex = "INFO" + +[pipelex.observer_config] +# Uncomment to change the directory where the observer will save its results: +# observer_dir = "results/observer" + +[pipelex.feature_config] +# WIP/Experimental feature flags: +# is_pipeline_tracking_enabled = false +# is_reporting_enabled = true + +[pipelex.reporting_config] +# Uncomment to customize the reporting configuration: +# is_log_costs_to_console = false +# is_generate_cost_report_file_enabled = false +# cost_report_dir_path = "reports" +# cost_report_base_name = "cost_report" +# cost_report_extension = "csv" +# cost_report_unit_scale = 1.0 + +[cogt] +[cogt.model_deck_config] +# Uncomment to disable model fallback: it will raise errors instead of using secondary model options: +# is_model_fallback_enabled = false +# Uncomment to change the reaction to missing presets: "raise" (default), "log" or "none" +# missing_presets_reaction = "raise" + +[cogt.tenacity_config] +# Uncomment to change those values as needed: +# max_retries = 50 # Maximum number of retry attempts before giving up +# wait_multiplier = 0.2 # Multiplier applied to the wait time between retries (in seconds) +# wait_max = 20 # Maximum wait time between retries (in seconds) +# wait_exp_base = 1.3 # Base for exponential backoff calculation + +[cogt.llm_config] +# Uncomment any of these to enable dumping the inputs or outputs of text-generation with an LLM: +# is_dump_text_prompts_enabled = true +# is_dump_response_text_enabled = true + +[cogt.llm_config.instructor_config] +# Uncomment any of these to enable dumping the kwargs, response or errors when generating structured content: +# is_dump_kwargs_enabled = true +# is_dump_response_enabled = true +# is_dump_error_enabled = true diff --git a/.pipelex/pipelex_service.toml b/.pipelex/pipelex_service.toml new file mode 100644 index 0000000..afe39a2 --- /dev/null +++ b/.pipelex/pipelex_service.toml @@ -0,0 +1,19 @@ +#################################################################################################### +# Pipelex Service Configuration +#################################################################################################### +# +# This file stores settings related to Pipelex managed services. +# Currently used for Pipelex Gateway terms acceptance. +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +#################################################################################################### + +[agreement] +# Set to true after accepting Pipelex terms of service. +terms_accepted = true + +# Note: when using pipelex_gateway, telemetry is enabled to monitor service usage. +# We collect technical data (model, pipe type...) and quantitative data (token counts...) +# but NOT your content, pipe codes, or output class names. diff --git a/.pipelex/telemetry.toml b/.pipelex/telemetry.toml new file mode 100644 index 0000000..eb2c537 --- /dev/null +++ b/.pipelex/telemetry.toml @@ -0,0 +1,92 @@ +#################################################################################################### +# Custom Telemetry Configuration +#################################################################################################### +# +# This file controls YOUR custom telemetry settings for observability and analytics. +# Configure your own PostHog, Langfuse, or OTLP-compatible backends here. +# +# NOTE: When using Pipelex Gateway, identified telemetry is automatically enabled +# (tied to your Gateway API key, hashed for security). This allows us to monitor +# service quality, enforce fair usage, and provide you with better support. +# Gateway telemetry operates independently from your settings below - you can have both! +# +# To disable all telemetry, set the DO_NOT_TRACK=1 environment variable. +# +# Documentation: https://docs.pipelex.com +# Support: https://go.pipelex.com/discord +# +#################################################################################################### + +# ────────────────────────────────────────────────────────────────────────────── +# PostHog Configuration (Event tracking + AI span tracing) +# ────────────────────────────────────────────────────────────────────────────── + +[custom_posthog] +mode = "off" # Values: "off" | "anonymous" | "identified" +# user_id = "your_user_id" # Required when mode = "identified" +endpoint = "${POSTHOG_ENDPOINT}" # Default: https://us.i.posthog.com (or https://eu.i.posthog.com for EU) +api_key = "${POSTHOG_API_KEY}" # Get from PostHog Project Settings +geoip = true # Enable GeoIP lookup +debug = false # Enable PostHog debug mode +redact_properties = [ + "prompt", + "system_prompt", + "response", + "file_path", + "url", +] # Event properties to redact + +# AI span tracing to YOUR PostHog (does NOT affect Langfuse/OTLP - they receive full data) +[custom_posthog.tracing] +enabled = false # Send AI spans to your PostHog + +# Privacy controls for data sent to YOUR PostHog only +[custom_posthog.tracing.capture] +content = false # Capture prompt/completion content +# content_max_length = 1000 # Max length for captured content (omit for unlimited) +pipe_codes = false # Include pipe codes in span names/attributes +output_class_names = false # Include output class names in span names/attributes + +# ────────────────────────────────────────────────────────────────────────────── +# Portkey SDK Configuration +# ────────────────────────────────────────────────────────────────────────────── + +[custom_portkey] +force_debug_enabled = false +force_tracing_enabled = false + +# ────────────────────────────────────────────────────────────────────────────── +# Langfuse Integration +# Note: Langfuse receives FULL span data (no redaction) +# ────────────────────────────────────────────────────────────────────────────── + +[langfuse] +enabled = false +# endpoint = "https://cloud.langfuse.com" # Override for self-hosted Langfuse +# public_key = "${LANGFUSE_PUBLIC_KEY}" # Langfuse public key +# secret_key = "${LANGFUSE_SECRET_KEY}" # Langfuse secret key + +# ────────────────────────────────────────────────────────────────────────────── +# Additional OTLP Exporters (array for multiple) +# Note: OTLP exporters receive FULL span data (no redaction) +# ────────────────────────────────────────────────────────────────────────────── + +# [[otlp]] +# name = "my-collector" # Identifier for logging +# endpoint = "https://..." # OTLP endpoint URL +# headers = { Authorization = "Bearer ${OTLP_AUTH_TOKEN}" } # Headers for OTLP export + +# ────────────────────────────────────────────────────────────────────────────── +# Custom Telemetry Allowed Modes +# Controls which integration modes can use custom telemetry settings above. +# ────────────────────────────────────────────────────────────────────────────── + +[telemetry_allowed_modes] +ci = false # CI environments don't use custom telemetry +cli = true # CLI usage allows custom telemetry +docker = true # Docker deployments allow custom telemetry +fastapi = true # FastAPI integrations allow custom telemetry +mcp = true # MCP integrations allow custom telemetry +n8n = true # n8n integrations allow custom telemetry +pytest = false # Tests don't use custom telemetry +python = false # Direct Python SDK usage doesn't use custom telemetry by default diff --git a/Makefile b/Makefile index 803118d..b3f7eb7 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ VIRTUAL_ENV := $(CURDIR)/.venv PROJECT_NAME := $(shell grep '^name = ' pyproject.toml | sed -E 's/name = "(.*)"/\1/') # The "?" is used to make the variable optional, so that it can be overridden by the user. -PYTHON_VERSION ?= 3.11 +PYTHON_VERSION ?= 3.13 VENV_PYTHON := $(VIRTUAL_ENV)/bin/python VENV_PYTEST := $(VIRTUAL_ENV)/bin/pytest VENV_RUFF := $(VIRTUAL_ENV)/bin/ruff @@ -16,7 +16,7 @@ VENV_PIPELEX := $(VIRTUAL_ENV)/bin/pipelex UV_MIN_VERSION = $(shell grep -m1 'required-version' pyproject.toml | sed -E 's/.*= *"([^<>=, ]+).*/\1/') -USUAL_PYTEST_MARKERS := "(dry_runnable or not (inference or llm or imgg or ocr)) and not (needs_output or pipelex_api)" +USUAL_PYTEST_MARKERS := "(dry_runnable or not inference) and not (needs_output or pipelex_api)" define PRINT_TITLE $(eval PROJECT_PART := [$(PROJECT_NAME)]) @@ -117,7 +117,7 @@ env: check-uv $(call PRINT_TITLE,"Creating virtual environment") @if [ ! -d $(VIRTUAL_ENV) ]; then \ echo "Creating Python virtual env in \`${VIRTUAL_ENV}\`"; \ - uv venv $(VIRTUAL_ENV) --python 3.11; \ + uv venv $(VIRTUAL_ENV) --python $(PYTHON_VERSION); \ else \ echo "Python virtual env already exists in \`${VIRTUAL_ENV}\`"; \ fi diff --git a/crazy/__init__.py b/crazy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/crazy/bundle.plx b/crazy/bundle.plx new file mode 100644 index 0000000..af706cd --- /dev/null +++ b/crazy/bundle.plx @@ -0,0 +1,42 @@ +domain = "crazy_image_generation" +description = "Imagining and rendering absurd, funny images with unexpected surreal elements" +main_pipe = "generate_crazy_image" + +[concept.ImagePrompt] +description = """ +A detailed textual description of a scene to be used as input for an image generation model, including subjects, setting, style, and visual details. +""" +refines = "Text" + +[pipe.generate_crazy_image] +type = "PipeSequence" +description = """ +Main pipeline that orchestrates the full crazy image generation flow - imagines a wild, absurd scene concept and renders it as an image +""" +output = "Image" +steps = [ + { pipe = "imagine_scene", result = "image_prompt" }, + { pipe = "render_image", result = "crazy_image" }, +] + +[pipe.imagine_scene] +type = "PipeLLM" +description = """ +Generates a creative, absurd, and hilarious image concept combining unexpected elements in surreal ways - think flying spaghetti monsters, penguins in business suits at a disco, or a T-Rex doing yoga on the moon +""" +output = "ImagePrompt" +model = "cheap_llm_for_creativity" +system_prompt = """ +You are a wildly creative visual concept artist specializing in absurd, surreal, and hilarious imagery. Your task is to generate a structured image prompt that combines unexpected elements in surprising and funny ways. Think outside the box - the more unexpected the combination, the better! +""" +prompt = """ +Generate a creative, absurd, and funny image concept. Combine unexpected elements in a surreal way that would make viewers laugh or do a double-take. Be VERY concise and focus on vivid, specific visual details that work well for image generation. +""" + +[pipe.render_image] +type = "PipeImgGen" +description = "Generates the absurd image based on the creative scene description" +inputs = { image_prompt = "ImagePrompt" } +output = "Image" +prompt = "$image_prompt" +model = "gen_image_high_quality" diff --git a/crazy/bundle_view.html b/crazy/bundle_view.html new file mode 100644 index 0000000..0e294d7 --- /dev/null +++ b/crazy/bundle_view.html @@ -0,0 +1,111 @@ + + + + + + + +
Domain: crazy_image_generation
+
+Description: Imagining and rendering absurd, funny images with unexpected surreal elements
+
+Main Pipe: generate_crazy_image
+
+
+
+                                              Concepts                                              
+┌──────────────────────────────────────────────────────────────────────────────────────────────────┐
+ Concept: ImagePrompt                                                                             
+ Refines: Text                                                                                    
+                                                                                                  
+ Description: A detailed textual description of a scene to be used as input for an image          
+ generation model, including subjects, setting, style, and visual details.                        
+                                                                                                  
+└──────────────────────────────────────────────────────────────────────────────────────────────────┘
+
+
+                                               Pipes                                                
+┌──────────────────────────────────────────────────────────────────────────────────────────────────┐
+ Pipe: generate_crazy_image                                                                       
+                                                                                                  
+ Type: PipeSequence (PipeController)                                                              
+                                                                                                  
+ Description: Main pipeline that orchestrates the full crazy image generation flow - imagines a   
+ wild, absurd scene concept and renders it as an image                                            
+                                                                                                  
+                                                                                                  
+ No inputs                                                                                        
+                                                                                                  
+ Output: Image                                                                                    
+                                                                                                  
+ Sequence Steps:                                                                                  
+ ┏━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓                                                          
+ ┃ Step ┃ Pipe          ┃ Result name  ┃                                                          
+ ┡━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩                                                          
+ │    1 │ imagine_scene  image_prompt                                                           
+ ├──────┼───────────────┼──────────────┤                                                          
+ │    2 │ render_image   crazy_image                                                            
+ └──────┴───────────────┴──────────────┘                                                          
+├──────────────────────────────────────────────────────────────────────────────────────────────────┤
+ Pipe: imagine_scene                                                                              
+                                                                                                  
+ Type: PipeLLM (PipeOperator)                                                                     
+                                                                                                  
+ Description: Generates a creative, absurd, and hilarious image concept combining unexpected      
+ elements in surreal ways - think flying spaghetti monsters, penguins in business suits at a      
+ disco, or a T-Rex doing yoga on the moon                                                         
+                                                                                                  
+                                                                                                  
+ No inputs                                                                                        
+                                                                                                  
+ Output: ImagePrompt                                                                              
+                                                                                                  
+ LLM Skill: cheap_llm_for_creativity                                                              
+                                                                                                  
+ ╭─ System Prompt ──────────────────────────────────────────────────────────────────────────────╮ 
+  You are a wildly creative visual concept artist specializing in absurd, surreal, and          
+  hilarious imagery. Your task is to generate a structured image prompt that combines           
+  unexpected elements in surprising and funny ways. Think outside the box - the more            
+  unexpected the combination, the better!                                                       
+ ╰──────────────────────────────────────────────────────────────────────────────────────────────╯ 
+                                                                                                  
+ ╭─ Prompt ─────────────────────────────────────────────────────────────────────────────────────╮ 
+  Generate a creative, absurd, and funny image concept. Combine unexpected elements in a        
+  surreal way that would make viewers laugh or do a double-take. Be VERY concise and focus on   
+  vivid, specific visual details that work well for image generation.                           
+ ╰──────────────────────────────────────────────────────────────────────────────────────────────╯ 
+├──────────────────────────────────────────────────────────────────────────────────────────────────┤
+ Pipe: render_image                                                                               
+                                                                                                  
+ Type: PipeImgGen (PipeOperator)                                                                  
+                                                                                                  
+ Description: Generates the absurd image based on the creative scene description                  
+                                                                                                  
+                                                                                                  
+ Input: image_prompt (ImagePrompt)                                                                
+                                                                                                  
+ Output: Image                                                                                    
+                                                                                                  
+ Image Generation Skill: gen_image_high_quality                                                   
+└──────────────────────────────────────────────────────────────────────────────────────────────────┘
+
+ + diff --git a/crazy/bundle_view.svg b/crazy/bundle_view.svg new file mode 100644 index 0000000..a7dbf52 --- /dev/null +++ b/crazy/bundle_view.svg @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Rich + + + + + + + + + + Domain: crazy_image_generation + +Description: Imagining and rendering absurd, funny images with unexpected surreal elements + +Main Pipe: generate_crazy_image + + + +                                              Concepts                                               +┌──────────────────────────────────────────────────────────────────────────────────────────────────┐ +Concept: ImagePrompt +Refines: Text + +Description: A detailed textual description of a scene to be used as input for an image  +generation model, including subjects, setting, style, and visual details. + +└──────────────────────────────────────────────────────────────────────────────────────────────────┘ + + +                                               Pipes                                                 +┌──────────────────────────────────────────────────────────────────────────────────────────────────┐ +Pipe: generate_crazy_image + +Type: PipeSequence (PipeController)                                                              + +Description: Main pipeline that orchestrates the full crazy image generation flow - imagines a  +wild, absurd scene concept and renders it as an image + + +No inputs                                                                                        + +Output: Image + +Sequence Steps:                         +┏━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓ +StepPipe         Result name  +┡━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩ +   1imagine_sceneimage_prompt +├──────┼───────────────┼──────────────┤ +   2render_image crazy_image  +└──────┴───────────────┴──────────────┘ +├──────────────────────────────────────────────────────────────────────────────────────────────────┤ +Pipe: imagine_scene + +Type: PipeLLM (PipeOperator)                                                                     + +Description: Generates a creative, absurd, and hilarious image concept combining unexpected  +elements in surreal ways - think flying spaghetti monsters, penguins in business suits at a  +disco, or a T-Rex doing yoga on the moon + + +No inputs                                                                                        + +Output: ImagePrompt + +LLM Skill: cheap_llm_for_creativity + +╭─ System Prompt ──────────────────────────────────────────────────────────────────────────────╮ +You are a wildly creative visual concept artist specializing in absurd, surreal, and         +hilarious imagery. Your task is to generate a structured image prompt that combines          +unexpected elements in surprising and funny ways. Think outside the box - the more           +unexpected the combination, the better!                                                      +╰──────────────────────────────────────────────────────────────────────────────────────────────╯ + +╭─ Prompt ─────────────────────────────────────────────────────────────────────────────────────╮ +Generate a creative, absurd, and funny image concept. Combine unexpected elements in a       +surreal way that would make viewers laugh or do a double-take. Be VERY concise and focus on  +vivid, specific visual details that work well for image generation.                          +╰──────────────────────────────────────────────────────────────────────────────────────────────╯ +├──────────────────────────────────────────────────────────────────────────────────────────────────┤ +Pipe: render_image + +Type: PipeImgGen (PipeOperator)                                                                  + +Description: Generates the absurd image based on the creative scene description + + +Input: image_prompt (ImagePrompt)                                                                + +Output: Image + +Image Generation Skill: gen_image_high_quality +└──────────────────────────────────────────────────────────────────────────────────────────────────┘ + + + + diff --git a/crazy/inputs.json b/crazy/inputs.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/crazy/inputs.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/crazy/run_generate_crazy_image.py b/crazy/run_generate_crazy_image.py new file mode 100644 index 0000000..2d0f24d --- /dev/null +++ b/crazy/run_generate_crazy_image.py @@ -0,0 +1,19 @@ +import asyncio + +from pipelex.core.stuffs.image_content import ImageContent +from pipelex.pipelex import Pipelex +from pipelex.pipeline.execute import execute_pipeline + + +async def run_generate_crazy_image() -> ImageContent: + pipe_output = await execute_pipeline( + pipe_code="generate_crazy_image", + ) + return pipe_output.main_stuff_as(content_type=ImageContent) + + +if __name__ == "__main__": + # Initialize Pipelex + with Pipelex.make(): + # Run the pipeline + result = asyncio.run(run_generate_crazy_image()) diff --git a/crazy/structures/__init__.py b/crazy/structures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/crazy/structures/crazy_image_generation_ImagePrompt.py b/crazy/structures/crazy_image_generation_ImagePrompt.py new file mode 100644 index 0000000..46b45ae --- /dev/null +++ b/crazy/structures/crazy_image_generation_ImagePrompt.py @@ -0,0 +1,22 @@ +""" +AUTOGENERATED CODE - DO NOT EDIT + +If you want to customize this structure: + 1. Copy this file to your own module + 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) + 3. Make sure your custom class is importable and registered + +To regenerate: pipelex build structures +""" + +from enum import Enum +from typing import Any, Dict, List, Literal, Optional + +from pipelex.core.stuffs.structured_content import StructuredContent +from pipelex.core.stuffs.text_content import TextContent +from pydantic import Field + + +class ImagePrompt(TextContent): + """Generated ImagePrompt class""" diff --git a/my_project/hello_world.py b/my_project/hello_world.py index 7249a14..f70cd9e 100644 --- a/my_project/hello_world.py +++ b/my_project/hello_world.py @@ -23,5 +23,5 @@ async def hello_world(): # start Pipelex -with Pipelex.make(): +with Pipelex.make(library_dirs=["my_project"]): asyncio.run(hello_world()) diff --git a/pyproject.toml b/pyproject.toml index 821ce0d..c8bb40a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,15 +12,17 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Operating System :: OS Independent", ] dependencies = [ - "pipelex[mistralai,anthropic,google,google-genai,bedrock,fal]==0.17.3", + "pipelex[mistralai,anthropic,google,google-genai,bedrock,fal]", + "pipelex==0.17.3", ] [tool.uv.sources] -pipelex = { path = "../pipelex", editable = true } +pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "feature/Chicago" } [tool.setuptools] diff --git a/uv.lock b/uv.lock index 66406c0..87b7dfc 100644 --- a/uv.lock +++ b/uv.lock @@ -354,11 +354,11 @@ wheels = [ [[package]] name = "cachetools" -version = "6.2.2" +version = "6.2.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fb/44/ca1675be2a83aeee1886ab745b28cda92093066590233cc501890eb8417a/cachetools-6.2.2.tar.gz", hash = "sha256:8e6d266b25e539df852251cfd6f990b4bc3a141db73b939058d809ebd2590fc6", size = 31571, upload-time = "2025-11-13T17:42:51.465Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/46/eb6eca305c77a4489affe1c5d8f4cae82f285d9addd8de4ec084a7184221/cachetools-6.2.2-py3-none-any.whl", hash = "sha256:6c09c98183bf58560c97b2abfcedcbaf6a896a490f534b031b661d3723b45ace", size = 11503, upload-time = "2025-11-13T17:42:50.232Z" }, + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, ] [[package]] @@ -527,11 +527,11 @@ wheels = [ [[package]] name = "eval-type-backport" -version = "0.3.0" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/51/23/079e39571d6dd8d90d7a369ecb55ad766efb6bae4e77389629e14458c280/eval_type_backport-0.3.0.tar.gz", hash = "sha256:1638210401e184ff17f877e9a2fa076b60b5838790f4532a21761cc2be67aea1", size = 9272, upload-time = "2025-11-13T20:56:50.845Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/d8/2a1c638d9e0aa7e269269a1a1bf423ddd94267f1a01bbe3ad03432b67dd4/eval_type_backport-0.3.0-py3-none-any.whl", hash = "sha256:975a10a0fe333c8b6260d7fdb637698c9a16c3a9e3b6eb943fee6a6f67a37fe8", size = 6061, upload-time = "2025-11-13T20:56:49.499Z" }, + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, ] [[package]] @@ -560,15 +560,17 @@ wheels = [ [[package]] name = "fal-client" -version = "0.9.1" +version = "0.11.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, { name = "httpx-sse" }, + { name = "msgpack" }, + { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b8/a1/98ab1cea4c2424ee612292bc92b07905e1a15a05584f6c263cde38e6a3a2/fal_client-0.9.1.tar.gz", hash = "sha256:c8f7f88f79c4b4c4f069be9f571be924dc7c4a6bf07c252fe0b75f3c46c8d66d", size = 17085, upload-time = "2025-11-13T18:15:09.911Z" } +sdist = { url = "https://files.pythonhosted.org/packages/44/58/223a48a4d0538e73c292086f284480be42ada14223d8067432bf5eeb7aaf/fal_client-0.11.0.tar.gz", hash = "sha256:350f8cd73f5035ae1e2678ce46beb7f9f43d0a96d43586b02cd88fd973e656e1", size = 21823, upload-time = "2026-01-05T15:22:33.606Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/57/775821a71459f2b83bbaa59452a4b1e4772f7c770de88a6f591c9d43c7c8/fal_client-0.9.1-py3-none-any.whl", hash = "sha256:8eba86c947299852c8306f685eee883ce01856543bf4344b87f65abd4b7d7622", size = 11157, upload-time = "2025-11-13T18:15:08.528Z" }, + { url = "https://files.pythonhosted.org/packages/64/67/7dd4c4b2b375cc3f072ec7bde528d7c8bafb3bcdd7df1e0758d97366a1c8/fal_client-0.11.0-py3-none-any.whl", hash = "sha256:dc4f528299aa9aeefad949e0bed0183fb78c19f0a1b7e7f85d95c859f2f694d7", size = 14771, upload-time = "2026-01-05T15:22:32.116Z" }, ] [[package]] @@ -724,6 +726,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/be/a4/7319a2a8add4cc352be9e3efeff5e2aacee917c85ca2fa1647e29089983c/google_auth-2.41.1-py2.py3-none-any.whl", hash = "sha256:754843be95575b9a19c604a848a41be03f7f2afd8c019f716dc1f51ee41c639d", size = 221302, upload-time = "2025-09-30T22:51:24.212Z" }, ] +[package.optional-dependencies] +requests = [ + { name = "requests" }, +] + [[package]] name = "google-auth-oauthlib" version = "1.2.3" @@ -739,21 +746,23 @@ wheels = [ [[package]] name = "google-genai" -version = "1.52.0" +version = "1.55.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, - { name = "google-auth" }, + { name = "distro" }, + { name = "google-auth", extra = ["requests"] }, { name = "httpx" }, { name = "pydantic" }, { name = "requests" }, + { name = "sniffio" }, { name = "tenacity" }, { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/09/4e/0ad8585d05312074bb69711b2d81cfed69ce0ae441913d57bf169bed20a7/google_genai-1.52.0.tar.gz", hash = "sha256:a74e8a4b3025f23aa98d6a0f84783119012ca6c336fd68f73c5d2b11465d7fc5", size = 258743, upload-time = "2025-11-21T02:18:55.742Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/7c/19b59750592702305ae211905985ec8ab56f34270af4a159fba5f0214846/google_genai-1.55.0.tar.gz", hash = "sha256:ae9f1318fedb05c7c1b671a4148724751201e8908a87568364a309804064d986", size = 477615, upload-time = "2025-12-11T02:49:28.624Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/66/03f663e7bca7abe9ccfebe6cb3fe7da9a118fd723a5abb278d6117e7990e/google_genai-1.52.0-py3-none-any.whl", hash = "sha256:c8352b9f065ae14b9322b949c7debab8562982f03bf71d44130cd2b798c20743", size = 261219, upload-time = "2025-11-21T02:18:54.515Z" }, + { url = "https://files.pythonhosted.org/packages/3e/86/a5a8e32b2d40b30b5fb20e7b8113fafd1e38befa4d1801abd5ce6991065a/google_genai-1.55.0-py3-none-any.whl", hash = "sha256:98c422762b5ff6e16b8d9a1e4938e8e0ad910392a5422e47f5301498d7f373a1", size = 703389, upload-time = "2025-12-11T02:49:27.105Z" }, ] [[package]] @@ -1243,6 +1252,67 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7c/97/5b428225ca4524b9722c8e1b2812c35f958ec5bb6a58c274c6c07a136da8/mistralai-1.5.2-py3-none-any.whl", hash = "sha256:5b1112acebbcad1afd7732ce0bd60614975b64999801c555c54768ac41f506ae", size = 278149, upload-time = "2025-03-19T18:40:28.232Z" }, ] +[[package]] +name = "msgpack" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4d/f2/bfb55a6236ed8725a96b0aa3acbd0ec17588e6a2c3b62a93eb513ed8783f/msgpack-1.1.2.tar.gz", hash = "sha256:3b60763c1373dd60f398488069bcdc703cd08a711477b5d480eecc9f9626f47e", size = 173581, upload-time = "2025-10-08T09:15:56.596Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/a2/3b68a9e769db68668b25c6108444a35f9bd163bb848c0650d516761a59c0/msgpack-1.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0051fffef5a37ca2cd16978ae4f0aef92f164df86823871b5162812bebecd8e2", size = 81318, upload-time = "2025-10-08T09:14:38.722Z" }, + { url = "https://files.pythonhosted.org/packages/5b/e1/2b720cc341325c00be44e1ed59e7cfeae2678329fbf5aa68f5bda57fe728/msgpack-1.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a605409040f2da88676e9c9e5853b3449ba8011973616189ea5ee55ddbc5bc87", size = 83786, upload-time = "2025-10-08T09:14:40.082Z" }, + { url = "https://files.pythonhosted.org/packages/71/e5/c2241de64bfceac456b140737812a2ab310b10538a7b34a1d393b748e095/msgpack-1.1.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b696e83c9f1532b4af884045ba7f3aa741a63b2bc22617293a2c6a7c645f251", size = 398240, upload-time = "2025-10-08T09:14:41.151Z" }, + { url = "https://files.pythonhosted.org/packages/b7/09/2a06956383c0fdebaef5aa9246e2356776f12ea6f2a44bd1368abf0e46c4/msgpack-1.1.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:365c0bbe981a27d8932da71af63ef86acc59ed5c01ad929e09a0b88c6294e28a", size = 406070, upload-time = "2025-10-08T09:14:42.821Z" }, + { url = "https://files.pythonhosted.org/packages/0e/74/2957703f0e1ef20637d6aead4fbb314330c26f39aa046b348c7edcf6ca6b/msgpack-1.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:41d1a5d875680166d3ac5c38573896453bbbea7092936d2e107214daf43b1d4f", size = 393403, upload-time = "2025-10-08T09:14:44.38Z" }, + { url = "https://files.pythonhosted.org/packages/a5/09/3bfc12aa90f77b37322fc33e7a8a7c29ba7c8edeadfa27664451801b9860/msgpack-1.1.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:354e81bcdebaab427c3df4281187edc765d5d76bfb3a7c125af9da7a27e8458f", size = 398947, upload-time = "2025-10-08T09:14:45.56Z" }, + { url = "https://files.pythonhosted.org/packages/4b/4f/05fcebd3b4977cb3d840f7ef6b77c51f8582086de5e642f3fefee35c86fc/msgpack-1.1.2-cp310-cp310-win32.whl", hash = "sha256:e64c8d2f5e5d5fda7b842f55dec6133260ea8f53c4257d64494c534f306bf7a9", size = 64769, upload-time = "2025-10-08T09:14:47.334Z" }, + { url = "https://files.pythonhosted.org/packages/d0/3e/b4547e3a34210956382eed1c85935fff7e0f9b98be3106b3745d7dec9c5e/msgpack-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:db6192777d943bdaaafb6ba66d44bf65aa0e9c5616fa1d2da9bb08828c6b39aa", size = 71293, upload-time = "2025-10-08T09:14:48.665Z" }, + { url = "https://files.pythonhosted.org/packages/2c/97/560d11202bcd537abca693fd85d81cebe2107ba17301de42b01ac1677b69/msgpack-1.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e86a607e558d22985d856948c12a3fa7b42efad264dca8a3ebbcfa2735d786c", size = 82271, upload-time = "2025-10-08T09:14:49.967Z" }, + { url = "https://files.pythonhosted.org/packages/83/04/28a41024ccbd67467380b6fb440ae916c1e4f25e2cd4c63abe6835ac566e/msgpack-1.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:283ae72fc89da59aa004ba147e8fc2f766647b1251500182fac0350d8af299c0", size = 84914, upload-time = "2025-10-08T09:14:50.958Z" }, + { url = "https://files.pythonhosted.org/packages/71/46/b817349db6886d79e57a966346cf0902a426375aadc1e8e7a86a75e22f19/msgpack-1.1.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:61c8aa3bd513d87c72ed0b37b53dd5c5a0f58f2ff9f26e1555d3bd7948fb7296", size = 416962, upload-time = "2025-10-08T09:14:51.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/e0/6cc2e852837cd6086fe7d8406af4294e66827a60a4cf60b86575a4a65ca8/msgpack-1.1.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:454e29e186285d2ebe65be34629fa0e8605202c60fbc7c4c650ccd41870896ef", size = 426183, upload-time = "2025-10-08T09:14:53.477Z" }, + { url = "https://files.pythonhosted.org/packages/25/98/6a19f030b3d2ea906696cedd1eb251708e50a5891d0978b012cb6107234c/msgpack-1.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7bc8813f88417599564fafa59fd6f95be417179f76b40325b500b3c98409757c", size = 411454, upload-time = "2025-10-08T09:14:54.648Z" }, + { url = "https://files.pythonhosted.org/packages/b7/cd/9098fcb6adb32187a70b7ecaabf6339da50553351558f37600e53a4a2a23/msgpack-1.1.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bafca952dc13907bdfdedfc6a5f579bf4f292bdd506fadb38389afa3ac5b208e", size = 422341, upload-time = "2025-10-08T09:14:56.328Z" }, + { url = "https://files.pythonhosted.org/packages/e6/ae/270cecbcf36c1dc85ec086b33a51a4d7d08fc4f404bdbc15b582255d05ff/msgpack-1.1.2-cp311-cp311-win32.whl", hash = "sha256:602b6740e95ffc55bfb078172d279de3773d7b7db1f703b2f1323566b878b90e", size = 64747, upload-time = "2025-10-08T09:14:57.882Z" }, + { url = "https://files.pythonhosted.org/packages/2a/79/309d0e637f6f37e83c711f547308b91af02b72d2326ddd860b966080ef29/msgpack-1.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:d198d275222dc54244bf3327eb8cbe00307d220241d9cec4d306d49a44e85f68", size = 71633, upload-time = "2025-10-08T09:14:59.177Z" }, + { url = "https://files.pythonhosted.org/packages/73/4d/7c4e2b3d9b1106cd0aa6cb56cc57c6267f59fa8bfab7d91df5adc802c847/msgpack-1.1.2-cp311-cp311-win_arm64.whl", hash = "sha256:86f8136dfa5c116365a8a651a7d7484b65b13339731dd6faebb9a0242151c406", size = 64755, upload-time = "2025-10-08T09:15:00.48Z" }, + { url = "https://files.pythonhosted.org/packages/ad/bd/8b0d01c756203fbab65d265859749860682ccd2a59594609aeec3a144efa/msgpack-1.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:70a0dff9d1f8da25179ffcf880e10cf1aad55fdb63cd59c9a49a1b82290062aa", size = 81939, upload-time = "2025-10-08T09:15:01.472Z" }, + { url = "https://files.pythonhosted.org/packages/34/68/ba4f155f793a74c1483d4bdef136e1023f7bcba557f0db4ef3db3c665cf1/msgpack-1.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:446abdd8b94b55c800ac34b102dffd2f6aa0ce643c55dfc017ad89347db3dbdb", size = 85064, upload-time = "2025-10-08T09:15:03.764Z" }, + { url = "https://files.pythonhosted.org/packages/f2/60/a064b0345fc36c4c3d2c743c82d9100c40388d77f0b48b2f04d6041dbec1/msgpack-1.1.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c63eea553c69ab05b6747901b97d620bb2a690633c77f23feb0c6a947a8a7b8f", size = 417131, upload-time = "2025-10-08T09:15:05.136Z" }, + { url = "https://files.pythonhosted.org/packages/65/92/a5100f7185a800a5d29f8d14041f61475b9de465ffcc0f3b9fba606e4505/msgpack-1.1.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:372839311ccf6bdaf39b00b61288e0557916c3729529b301c52c2d88842add42", size = 427556, upload-time = "2025-10-08T09:15:06.837Z" }, + { url = "https://files.pythonhosted.org/packages/f5/87/ffe21d1bf7d9991354ad93949286f643b2bb6ddbeab66373922b44c3b8cc/msgpack-1.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2929af52106ca73fcb28576218476ffbb531a036c2adbcf54a3664de124303e9", size = 404920, upload-time = "2025-10-08T09:15:08.179Z" }, + { url = "https://files.pythonhosted.org/packages/ff/41/8543ed2b8604f7c0d89ce066f42007faac1eaa7d79a81555f206a5cdb889/msgpack-1.1.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be52a8fc79e45b0364210eef5234a7cf8d330836d0a64dfbb878efa903d84620", size = 415013, upload-time = "2025-10-08T09:15:09.83Z" }, + { url = "https://files.pythonhosted.org/packages/41/0d/2ddfaa8b7e1cee6c490d46cb0a39742b19e2481600a7a0e96537e9c22f43/msgpack-1.1.2-cp312-cp312-win32.whl", hash = "sha256:1fff3d825d7859ac888b0fbda39a42d59193543920eda9d9bea44d958a878029", size = 65096, upload-time = "2025-10-08T09:15:11.11Z" }, + { url = "https://files.pythonhosted.org/packages/8c/ec/d431eb7941fb55a31dd6ca3404d41fbb52d99172df2e7707754488390910/msgpack-1.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:1de460f0403172cff81169a30b9a92b260cb809c4cb7e2fc79ae8d0510c78b6b", size = 72708, upload-time = "2025-10-08T09:15:12.554Z" }, + { url = "https://files.pythonhosted.org/packages/c5/31/5b1a1f70eb0e87d1678e9624908f86317787b536060641d6798e3cf70ace/msgpack-1.1.2-cp312-cp312-win_arm64.whl", hash = "sha256:be5980f3ee0e6bd44f3a9e9dea01054f175b50c3e6cdb692bc9424c0bbb8bf69", size = 64119, upload-time = "2025-10-08T09:15:13.589Z" }, + { url = "https://files.pythonhosted.org/packages/6b/31/b46518ecc604d7edf3a4f94cb3bf021fc62aa301f0cb849936968164ef23/msgpack-1.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4efd7b5979ccb539c221a4c4e16aac1a533efc97f3b759bb5a5ac9f6d10383bf", size = 81212, upload-time = "2025-10-08T09:15:14.552Z" }, + { url = "https://files.pythonhosted.org/packages/92/dc/c385f38f2c2433333345a82926c6bfa5ecfff3ef787201614317b58dd8be/msgpack-1.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:42eefe2c3e2af97ed470eec850facbe1b5ad1d6eacdbadc42ec98e7dcf68b4b7", size = 84315, upload-time = "2025-10-08T09:15:15.543Z" }, + { url = "https://files.pythonhosted.org/packages/d3/68/93180dce57f684a61a88a45ed13047558ded2be46f03acb8dec6d7c513af/msgpack-1.1.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1fdf7d83102bf09e7ce3357de96c59b627395352a4024f6e2458501f158bf999", size = 412721, upload-time = "2025-10-08T09:15:16.567Z" }, + { url = "https://files.pythonhosted.org/packages/5d/ba/459f18c16f2b3fc1a1ca871f72f07d70c07bf768ad0a507a698b8052ac58/msgpack-1.1.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fac4be746328f90caa3cd4bc67e6fe36ca2bf61d5c6eb6d895b6527e3f05071e", size = 424657, upload-time = "2025-10-08T09:15:17.825Z" }, + { url = "https://files.pythonhosted.org/packages/38/f8/4398c46863b093252fe67368b44edc6c13b17f4e6b0e4929dbf0bdb13f23/msgpack-1.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fffee09044073e69f2bad787071aeec727183e7580443dfeb8556cbf1978d162", size = 402668, upload-time = "2025-10-08T09:15:19.003Z" }, + { url = "https://files.pythonhosted.org/packages/28/ce/698c1eff75626e4124b4d78e21cca0b4cc90043afb80a507626ea354ab52/msgpack-1.1.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5928604de9b032bc17f5099496417f113c45bc6bc21b5c6920caf34b3c428794", size = 419040, upload-time = "2025-10-08T09:15:20.183Z" }, + { url = "https://files.pythonhosted.org/packages/67/32/f3cd1667028424fa7001d82e10ee35386eea1408b93d399b09fb0aa7875f/msgpack-1.1.2-cp313-cp313-win32.whl", hash = "sha256:a7787d353595c7c7e145e2331abf8b7ff1e6673a6b974ded96e6d4ec09f00c8c", size = 65037, upload-time = "2025-10-08T09:15:21.416Z" }, + { url = "https://files.pythonhosted.org/packages/74/07/1ed8277f8653c40ebc65985180b007879f6a836c525b3885dcc6448ae6cb/msgpack-1.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:a465f0dceb8e13a487e54c07d04ae3ba131c7c5b95e2612596eafde1dccf64a9", size = 72631, upload-time = "2025-10-08T09:15:22.431Z" }, + { url = "https://files.pythonhosted.org/packages/e5/db/0314e4e2db56ebcf450f277904ffd84a7988b9e5da8d0d61ab2d057df2b6/msgpack-1.1.2-cp313-cp313-win_arm64.whl", hash = "sha256:e69b39f8c0aa5ec24b57737ebee40be647035158f14ed4b40e6f150077e21a84", size = 64118, upload-time = "2025-10-08T09:15:23.402Z" }, + { url = "https://files.pythonhosted.org/packages/22/71/201105712d0a2ff07b7873ed3c220292fb2ea5120603c00c4b634bcdafb3/msgpack-1.1.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e23ce8d5f7aa6ea6d2a2b326b4ba46c985dbb204523759984430db7114f8aa00", size = 81127, upload-time = "2025-10-08T09:15:24.408Z" }, + { url = "https://files.pythonhosted.org/packages/1b/9f/38ff9e57a2eade7bf9dfee5eae17f39fc0e998658050279cbb14d97d36d9/msgpack-1.1.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:6c15b7d74c939ebe620dd8e559384be806204d73b4f9356320632d783d1f7939", size = 84981, upload-time = "2025-10-08T09:15:25.812Z" }, + { url = "https://files.pythonhosted.org/packages/8e/a9/3536e385167b88c2cc8f4424c49e28d49a6fc35206d4a8060f136e71f94c/msgpack-1.1.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e2cb7b9031568a2a5c73aa077180f93dd2e95b4f8d3b8e14a73ae94a9e667e", size = 411885, upload-time = "2025-10-08T09:15:27.22Z" }, + { url = "https://files.pythonhosted.org/packages/2f/40/dc34d1a8d5f1e51fc64640b62b191684da52ca469da9cd74e84936ffa4a6/msgpack-1.1.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:180759d89a057eab503cf62eeec0aa61c4ea1200dee709f3a8e9397dbb3b6931", size = 419658, upload-time = "2025-10-08T09:15:28.4Z" }, + { url = "https://files.pythonhosted.org/packages/3b/ef/2b92e286366500a09a67e03496ee8b8ba00562797a52f3c117aa2b29514b/msgpack-1.1.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:04fb995247a6e83830b62f0b07bf36540c213f6eac8e851166d8d86d83cbd014", size = 403290, upload-time = "2025-10-08T09:15:29.764Z" }, + { url = "https://files.pythonhosted.org/packages/78/90/e0ea7990abea5764e4655b8177aa7c63cdfa89945b6e7641055800f6c16b/msgpack-1.1.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8e22ab046fa7ede9e36eeb4cfad44d46450f37bb05d5ec482b02868f451c95e2", size = 415234, upload-time = "2025-10-08T09:15:31.022Z" }, + { url = "https://files.pythonhosted.org/packages/72/4e/9390aed5db983a2310818cd7d3ec0aecad45e1f7007e0cda79c79507bb0d/msgpack-1.1.2-cp314-cp314-win32.whl", hash = "sha256:80a0ff7d4abf5fecb995fcf235d4064b9a9a8a40a3ab80999e6ac1e30b702717", size = 66391, upload-time = "2025-10-08T09:15:32.265Z" }, + { url = "https://files.pythonhosted.org/packages/6e/f1/abd09c2ae91228c5f3998dbd7f41353def9eac64253de3c8105efa2082f7/msgpack-1.1.2-cp314-cp314-win_amd64.whl", hash = "sha256:9ade919fac6a3e7260b7f64cea89df6bec59104987cbea34d34a2fa15d74310b", size = 73787, upload-time = "2025-10-08T09:15:33.219Z" }, + { url = "https://files.pythonhosted.org/packages/6a/b0/9d9f667ab48b16ad4115c1935d94023b82b3198064cb84a123e97f7466c1/msgpack-1.1.2-cp314-cp314-win_arm64.whl", hash = "sha256:59415c6076b1e30e563eb732e23b994a61c159cec44deaf584e5cc1dd662f2af", size = 66453, upload-time = "2025-10-08T09:15:34.225Z" }, + { url = "https://files.pythonhosted.org/packages/16/67/93f80545eb1792b61a217fa7f06d5e5cb9e0055bed867f43e2b8e012e137/msgpack-1.1.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:897c478140877e5307760b0ea66e0932738879e7aa68144d9b78ea4c8302a84a", size = 85264, upload-time = "2025-10-08T09:15:35.61Z" }, + { url = "https://files.pythonhosted.org/packages/87/1c/33c8a24959cf193966ef11a6f6a2995a65eb066bd681fd085afd519a57ce/msgpack-1.1.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a668204fa43e6d02f89dbe79a30b0d67238d9ec4c5bd8a940fc3a004a47b721b", size = 89076, upload-time = "2025-10-08T09:15:36.619Z" }, + { url = "https://files.pythonhosted.org/packages/fc/6b/62e85ff7193663fbea5c0254ef32f0c77134b4059f8da89b958beb7696f3/msgpack-1.1.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5559d03930d3aa0f3aacb4c42c776af1a2ace2611871c84a75afe436695e6245", size = 435242, upload-time = "2025-10-08T09:15:37.647Z" }, + { url = "https://files.pythonhosted.org/packages/c1/47/5c74ecb4cc277cf09f64e913947871682ffa82b3b93c8dad68083112f412/msgpack-1.1.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:70c5a7a9fea7f036b716191c29047374c10721c389c21e9ffafad04df8c52c90", size = 432509, upload-time = "2025-10-08T09:15:38.794Z" }, + { url = "https://files.pythonhosted.org/packages/24/a4/e98ccdb56dc4e98c929a3f150de1799831c0a800583cde9fa022fa90602d/msgpack-1.1.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f2cb069d8b981abc72b41aea1c580ce92d57c673ec61af4c500153a626cb9e20", size = 415957, upload-time = "2025-10-08T09:15:40.238Z" }, + { url = "https://files.pythonhosted.org/packages/da/28/6951f7fb67bc0a4e184a6b38ab71a92d9ba58080b27a77d3e2fb0be5998f/msgpack-1.1.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d62ce1f483f355f61adb5433ebfd8868c5f078d1a52d042b0a998682b4fa8c27", size = 422910, upload-time = "2025-10-08T09:15:41.505Z" }, + { url = "https://files.pythonhosted.org/packages/f0/03/42106dcded51f0a0b5284d3ce30a671e7bd3f7318d122b2ead66ad289fed/msgpack-1.1.2-cp314-cp314t-win32.whl", hash = "sha256:1d1418482b1ee984625d88aa9585db570180c286d942da463533b238b98b812b", size = 75197, upload-time = "2025-10-08T09:15:42.954Z" }, + { url = "https://files.pythonhosted.org/packages/15/86/d0071e94987f8db59d4eeb386ddc64d0bb9b10820a8d82bcd3e53eeb2da6/msgpack-1.1.2-cp314-cp314t-win_amd64.whl", hash = "sha256:5a46bf7e831d09470ad92dff02b8b1ac92175ca36b087f904a0519857c6be3ff", size = 85772, upload-time = "2025-10-08T09:15:43.954Z" }, + { url = "https://files.pythonhosted.org/packages/81/f2/08ace4142eb281c12701fc3b93a10795e4d4dc7f753911d836675050f886/msgpack-1.1.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d99ef64f349d5ec3293688e91486c5fdb925ed03807f64d98d205d2713c60b46", size = 70868, upload-time = "2025-10-08T09:15:44.959Z" }, +] + [[package]] name = "multidict" version = "6.7.0" @@ -1410,7 +1480,8 @@ dev = [ requires-dist = [ { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, - { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], editable = "../pipelex" }, + { name = "pipelex", git = "https://github.com/Pipelex/pipelex.git?branch=feature%2FChicago" }, + { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=feature%2FChicago" }, { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, @@ -1891,7 +1962,7 @@ wheels = [ [[package]] name = "pipelex" version = "0.17.3" -source = { editable = "../pipelex" } +source = { git = "https://github.com/Pipelex/pipelex.git?branch=feature%2FChicago#88f1cefb1901f0759e91b06a2ba2f580291a29f2" } dependencies = [ { name = "aiofiles" }, { name = "backports-strenum", marker = "python_full_version < '3.11'" }, @@ -1947,69 +2018,6 @@ mistralai = [ { name = "mistralai" }, ] -[package.metadata] -requires-dist = [ - { name = "aioboto3", marker = "extra == 'bedrock'", specifier = ">=13.4.0" }, - { name = "aiofiles", specifier = ">=23.2.1" }, - { name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.60.0" }, - { name = "backports-strenum", marker = "python_full_version < '3.11'", specifier = ">=1.3.0" }, - { name = "boto3", marker = "extra == 'bedrock'", specifier = ">=1.34.131" }, - { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, - { name = "docling", marker = "extra == 'docling'", specifier = ">=2.64.0" }, - { name = "fal-client", marker = "extra == 'fal'", specifier = ">=0.4.1" }, - { name = "filetype", specifier = ">=1.2.0" }, - { name = "google-auth-oauthlib", marker = "extra == 'google'", specifier = ">=1.2.1" }, - { name = "google-genai", marker = "extra == 'google-genai'" }, - { name = "httpx", specifier = ">=0.23.0,<1.0.0" }, - { name = "instructor", specifier = ">=1.8.3,!=1.11.*,!=1.12.*" }, - { name = "instructor", extras = ["google-genai"], marker = "extra == 'google-genai'" }, - { name = "jinja2", specifier = ">=3.1.4" }, - { name = "json2html", specifier = ">=1.3.0" }, - { name = "kajson", specifier = "==0.3.1" }, - { name = "markdown", specifier = ">=3.6" }, - { name = "mistralai", marker = "extra == 'mistralai'", specifier = "==1.5.2" }, - { name = "mkdocs", marker = "extra == 'docs'", specifier = "==1.6.1" }, - { name = "mkdocs-glightbox", marker = "extra == 'docs'", specifier = "==0.4.0" }, - { name = "mkdocs-material", marker = "extra == 'docs'", specifier = "==9.6.14" }, - { name = "mkdocs-meta-manager", marker = "extra == 'docs'", specifier = "==1.1.0" }, - { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, - { name = "networkx", specifier = ">=3.4.2" }, - { name = "openai", specifier = ">=1.108.1" }, - { name = "opentelemetry-api" }, - { name = "opentelemetry-exporter-otlp-proto-http" }, - { name = "opentelemetry-sdk" }, - { name = "opentelemetry-semantic-conventions" }, - { name = "pillow", specifier = ">=11.2.1" }, - { name = "polyfactory", specifier = ">=2.21.0" }, - { name = "portkey-ai", specifier = ">=2.1.0" }, - { name = "posthog", specifier = ">=6.7.0" }, - { name = "pydantic", specifier = ">=2.10.6,<3.0.0" }, - { name = "pylint", marker = "extra == 'dev'", specifier = ">=3.3.8" }, - { name = "pypdfium2", specifier = ">=4.30.0,!=4.30.1" }, - { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, - { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, - { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, - { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=6.1.1" }, - { name = "pytest-mock", marker = "extra == 'dev'", specifier = ">=3.14.0" }, - { name = "pytest-sugar", marker = "extra == 'dev'", specifier = ">=1.0.0" }, - { name = "pytest-xdist", marker = "extra == 'dev'", specifier = ">=3.6.1" }, - { name = "python-dotenv", specifier = ">=1.0.1" }, - { name = "pyyaml", specifier = ">=6.0.2" }, - { name = "rich", specifier = ">=13.8.1" }, - { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.6.8" }, - { name = "shortuuid", specifier = ">=1.0.13" }, - { name = "tomli", specifier = ">=2.3.0" }, - { name = "tomlkit", specifier = ">=0.13.2" }, - { name = "typer", specifier = ">=0.16.0" }, - { name = "types-aioboto3", extras = ["bedrock", "bedrock-runtime"], marker = "extra == 'dev'", specifier = ">=13.4.0" }, - { name = "types-aiofiles", marker = "extra == 'dev'", specifier = ">=24.1.0.20240626" }, - { name = "types-markdown", marker = "extra == 'dev'", specifier = ">=3.6.0.20240316" }, - { name = "types-networkx", marker = "extra == 'dev'", specifier = ">=3.3.0.20241020" }, - { name = "types-pyyaml", marker = "extra == 'dev'", specifier = ">=6.0.12.20250326" }, - { name = "typing-extensions", specifier = ">=4.13.2" }, -] -provides-extras = ["anthropic", "bedrock", "docling", "fal", "google", "google-genai", "mistralai", "docs", "dev"] - [[package]] name = "platformdirs" version = "4.5.0" From 92be946afd2502858429a84657d036f329fa5b32 Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Mon, 19 Jan 2026 11:22:53 +0100 Subject: [PATCH 03/10] Update agent rules. Cleanup config files and test pipeline. --- .blackboxrules | 233 ++++++---- .cursor/rules/run_pipelex.mdc | 6 +- .cursor/rules/write_pipelex.mdc | 149 ++++++- .github/copilot-instructions.md | 233 ++++++---- .pipelex/inference/backends.toml | 109 ----- .pipelex/inference/backends/anthropic.toml | 100 ----- .pipelex/inference/backends/azure_openai.toml | 213 ---------- .pipelex/inference/backends/bedrock.toml | 120 ------ .pipelex/inference/backends/blackboxai.toml | 240 ----------- .pipelex/inference/backends/fal.toml | 107 ----- .pipelex/inference/backends/google.toml | 94 ----- .pipelex/inference/backends/groq.toml | 129 ------ .pipelex/inference/backends/huggingface.toml | 43 -- .pipelex/inference/backends/internal.toml | 37 -- .pipelex/inference/backends/mistral.toml | 164 -------- .pipelex/inference/backends/ollama.toml | 63 --- .pipelex/inference/backends/openai.toml | 208 --------- .../inference/backends/pipelex_gateway.toml | 41 -- .../inference/backends/pipelex_inference.toml | 205 --------- .pipelex/inference/backends/portkey.toml | 263 ------------ .pipelex/inference/backends/scaleway.toml | 67 --- .pipelex/inference/backends/vertexai.toml | 54 --- .pipelex/inference/backends/xai.toml | 56 --- .pipelex/inference/deck/base_deck.toml | 202 --------- .pipelex/inference/deck/overrides.toml | 19 - .pipelex/inference/routing_profiles.toml | 173 -------- .pipelex/pipelex.toml | 162 ------- .pipelex/pipelex_service.toml | 19 - .pipelex/telemetry.toml | 92 ---- .windsurfrules.md | 233 ++++++---- AGENTS.md | 233 ++++++---- CLAUDE.md | 233 ++++++---- crazy/__init__.py | 0 crazy/bundle.plx | 42 -- crazy/bundle_view.html | 111 ----- crazy/bundle_view.svg | 397 ------------------ crazy/inputs.json | 1 - crazy/run_generate_crazy_image.py | 19 - crazy/structures/__init__.py | 0 .../crazy_image_generation_ImagePrompt.py | 22 - 40 files changed, 870 insertions(+), 4022 deletions(-) delete mode 100644 .pipelex/inference/backends.toml delete mode 100644 .pipelex/inference/backends/anthropic.toml delete mode 100644 .pipelex/inference/backends/azure_openai.toml delete mode 100644 .pipelex/inference/backends/bedrock.toml delete mode 100644 .pipelex/inference/backends/blackboxai.toml delete mode 100644 .pipelex/inference/backends/fal.toml delete mode 100644 .pipelex/inference/backends/google.toml delete mode 100644 .pipelex/inference/backends/groq.toml delete mode 100644 .pipelex/inference/backends/huggingface.toml delete mode 100644 .pipelex/inference/backends/internal.toml delete mode 100644 .pipelex/inference/backends/mistral.toml delete mode 100644 .pipelex/inference/backends/ollama.toml delete mode 100644 .pipelex/inference/backends/openai.toml delete mode 100644 .pipelex/inference/backends/pipelex_gateway.toml delete mode 100644 .pipelex/inference/backends/pipelex_inference.toml delete mode 100644 .pipelex/inference/backends/portkey.toml delete mode 100644 .pipelex/inference/backends/scaleway.toml delete mode 100644 .pipelex/inference/backends/vertexai.toml delete mode 100644 .pipelex/inference/backends/xai.toml delete mode 100644 .pipelex/inference/deck/base_deck.toml delete mode 100644 .pipelex/inference/deck/overrides.toml delete mode 100644 .pipelex/inference/routing_profiles.toml delete mode 100644 .pipelex/pipelex.toml delete mode 100644 .pipelex/pipelex_service.toml delete mode 100644 .pipelex/telemetry.toml delete mode 100644 crazy/__init__.py delete mode 100644 crazy/bundle.plx delete mode 100644 crazy/bundle_view.html delete mode 100644 crazy/bundle_view.svg delete mode 100644 crazy/inputs.json delete mode 100644 crazy/run_generate_crazy_image.py delete mode 100644 crazy/structures/__init__.py delete mode 100644 crazy/structures/crazy_image_generation_ImagePrompt.py diff --git a/.blackboxrules b/.blackboxrules index 6125caa..af4572e 100644 --- a/.blackboxrules +++ b/.blackboxrules @@ -23,10 +23,10 @@ A pipeline file has three main sections: #### Domain Statement ```plx -domain = "domain_name" +domain = "domain_code" description = "Description of the domain" # Optional ``` -Note: The domain name usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. +Note: The domain code usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. #### Concept Definitions @@ -62,7 +62,7 @@ For details on how to structure concepts with fields, see the "Structuring Model ### Pipe Base Definition ```plx -[pipe.your_pipe_name] +[pipe.your_pipe_code] type = "PipeLLM" description = "A description of what your pipe does" inputs = { input_1 = "ConceptName1", input_2 = "ConceptName2" } @@ -471,7 +471,7 @@ The PipeExtract operator is used to extract text and images from an image or a P [pipe.extract_info] type = "PipeExtract" description = "extract the information" -inputs = { document = "PDF" } # or { image = "Image" } if it's an image. This is the only input. +inputs = { document = "Document" } # or { image = "Image" } if it's an image. This is the only input. output = "Page" ``` @@ -480,7 +480,7 @@ Using Extract Model Settings: [pipe.extract_with_model] type = "PipeExtract" description = "Extract with specific model" -inputs = { document = "PDF" } +inputs = { document = "Document" } output = "Page" model = "base_extract_mistral" # Use predefined extract preset or model alias ``` @@ -588,15 +588,16 @@ $sales_rep.phone | $sales_rep.email """ ``` -#### Key Parameters +#### Key Parameters (Template Mode) -- `template`: Inline template string (mutually exclusive with template_name) +- `template`: Inline template string (mutually exclusive with template_name and construct) - `template_name`: Name of a predefined template (mutually exclusive with template) - `template_category`: Template type ("llm_prompt", "html", "markdown", "mermaid", etc.) - `templating_style`: Styling options for template rendering - `extra_context`: Additional context variables for template For more control, you can use a nested `template` section instead of the `template` field: + - `template.template`: The template string - `template.category`: Template type - `template.templating_style`: Styling options @@ -604,9 +605,143 @@ For more control, you can use a nested `template` section instead of the `templa #### Template Variables Use the same variable insertion rules as PipeLLM: + - `@variable` for block insertion (multi-line content) - `$variable` for inline insertion (short text) +#### Construct Mode (for StructuredContent Output) + +PipeCompose can also generate `StructuredContent` objects using the `construct` section. This mode composes field values from fixed values, variable references, templates, or nested structures. + +**When to use construct mode:** + +- You need to output a structured object (not just Text) +- You want to deterministically compose fields from existing data +- No LLM is needed - just data composition and templating + +##### Basic Construct Usage + +```plx +[concept.SalesSummary] +description = "A structured sales summary" + +[concept.SalesSummary.structure] +report_title = { type = "text", description = "Title of the report" } +customer_name = { type = "text", description = "Customer name" } +deal_value = { type = "number", description = "Deal value" } +summary_text = { type = "text", description = "Generated summary text" } + +[pipe.compose_summary] +type = "PipeCompose" +description = "Compose a sales summary from deal data" +inputs = { deal = "Deal" } +output = "SalesSummary" + +[pipe.compose_summary.construct] +report_title = "Monthly Sales Report" +customer_name = { from = "deal.customer_name" } +deal_value = { from = "deal.amount" } +summary_text = { template = "Deal worth $deal.amount with $deal.customer_name" } +``` + +##### Field Composition Methods + +There are four ways to define field values in a construct: + +**1. Fixed Value (literal)** + +Use a literal value directly: + +```plx +[pipe.compose_report.construct] +report_title = "Annual Report" +report_year = 2024 +is_draft = false +``` + +**2. Variable Reference (`from`)** + +Get a value from working memory using a dotted path: + +```plx +[pipe.compose_report.construct] +customer_name = { from = "deal.customer_name" } +total_amount = { from = "order.total" } +street_address = { from = "customer.address.street" } +``` + +**3. Template (`template`)** + +Render a Jinja2 template with variable substitution: + +```plx +[pipe.compose_report.construct] +invoice_number = { template = "INV-$order.id" } +summary = { template = "Deal worth $deal.amount with $deal.customer_name on {{ current_date }}" } +``` + +**4. Nested Construct** + +For nested structures, use a TOML subsection: + +```plx +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Complete Construct Example + +```plx +domain = "invoicing" + +[concept.Address] +description = "A postal address" + +[concept.Address.structure] +street = { type = "text", description = "Street address" } +city = { type = "text", description = "City name" } +country = { type = "text", description = "Country name" } + +[concept.Invoice] +description = "An invoice document" + +[concept.Invoice.structure] +invoice_number = { type = "text", description = "Invoice number" } +total = { type = "number", description = "Total amount" } + +[pipe.compose_invoice] +type = "PipeCompose" +description = "Compose an invoice from order and customer data" +inputs = { order = "Order", customer = "Customer" } +output = "Invoice" + +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Key Parameters (Construct Mode) + +- `construct`: Dictionary mapping field names to their composition rules +- Each field can be: + - A literal value (string, number, boolean) + - A dict with `from` key for variable reference + - A dict with `template` key for template rendering + - A nested dict for nested structures + +**Note:** You must use either `template` or `construct`, not both. They are mutually exclusive. + ### PipeImgGen operator The PipeImgGen operator is used to generate images using AI image generation models. @@ -952,13 +1087,13 @@ So here are a few concrete examples of calls to execute_pipeline with various wa }, ) -## Here we have a single input and it's a PDF. -## Because PDFContent is a native concept, we can use it directly as a value, +## Here we have a single input and it's a document. +## Because DocumentContent is a native concept, we can use it directly as a value, ## the system knows what content it corresponds to: pipe_output = await execute_pipeline( pipe_code="power_extractor_dpe", inputs={ - "document": PDFContent(url=pdf_url), + "document": DocumentContent(url=pdf_url), }, ) @@ -1081,82 +1216,4 @@ result_list = pipe_output.main_stuff_as_items(item_type=GanttChart) ``` --- - -## Rules to choose LLM models used in PipeLLMs. - -### LLM Configuration System - -In order to use it in a pipe, an LLM is referenced by its llm_handle (alias) and possibly by an llm_preset. -LLM configurations are managed through the new inference backend system with files located in `.pipelex/inference/`: - -- **Model Deck**: `.pipelex/inference/deck/base_deck.toml` and `.pipelex/inference/deck/overrides.toml` -- **Backends**: `.pipelex/inference/backends.toml` and `.pipelex/inference/backends/*.toml` -- **Routing**: `.pipelex/inference/routing_profiles.toml` - -### LLM Handles - -An llm_handle can be either: -1. **A direct model name** (like "gpt-4o-mini", "claude-3-sonnet") - automatically available for all models loaded by the inference backend system -2. **An alias** - user-defined shortcuts that map to model names, defined in the `[aliases]` section: - -```toml -[aliases] -base-claude = "claude-4.5-sonnet" -base-gpt = "gpt-5" -base-gemini = "gemini-2.5-flash" -base-mistral = "mistral-medium" -``` - -The system first looks for direct model names, then checks aliases if no direct match is found. The system handles model routing through backends automatically. - -### Using an LLM Handle in a PipeLLM - -Here is an example of using an llm_handle to specify which LLM to use in a PipeLLM: - -```plx -[pipe.hello_world] -type = "PipeLLM" -description = "Write text about Hello World." -output = "Text" -model = { model = "gpt-5", temperature = 0.9 } -prompt = """ -Write a haiku about Hello World. -""" -``` - -As you can see, to use the LLM, you must also indicate the temperature (float between 0 and 1) and max_tokens (either an int or the string "auto"). - -### LLM Presets - -Presets are meant to record the choice of an llm with its hyper parameters (temperature and max_tokens) if it's good for a particular task. LLM Presets are skill-oriented. - -Examples: -```toml -llm_to_engineer = { model = "base-claude", temperature = 1 } -llm_to_extract_invoice = { model = "claude-4.5-sonnet", temperature = 0.1, max_tokens = "auto" } -``` - -The interest is that these presets can be used to set the LLM choice in a PipeLLM, like this: - -```plx -[pipe.extract_invoice] -type = "PipeLLM" -description = "Extract invoice information from an invoice text transcript" -inputs = { invoice_text = "InvoiceText" } -output = "Invoice" -model = "llm_to_extract_invoice" -prompt = """ -Extract invoice information from this invoice: - -The category of this invoice is: $invoice_details.category. - -@invoice_text -""" -``` - -The setting here `model = "llm_to_extract_invoice"` works because "llm_to_extract_invoice" has been declared as an llm_preset in the deck. -You must not use an LLM preset in a PipeLLM that does not exist in the deck. If needed, you can add llm presets. - - -You can override the predefined llm presets by setting them in `.pipelex/inference/deck/overrides.toml`. diff --git a/.cursor/rules/run_pipelex.mdc b/.cursor/rules/run_pipelex.mdc index 8387838..7650051 100644 --- a/.cursor/rules/run_pipelex.mdc +++ b/.cursor/rules/run_pipelex.mdc @@ -99,13 +99,13 @@ So here are a few concrete examples of calls to execute_pipeline with various wa }, ) -# Here we have a single input and it's a PDF. -# Because PDFContent is a native concept, we can use it directly as a value, +# Here we have a single input and it's a document. +# Because DocumentContent is a native concept, we can use it directly as a value, # the system knows what content it corresponds to: pipe_output = await execute_pipeline( pipe_code="power_extractor_dpe", inputs={ - "document": PDFContent(url=pdf_url), + "document": DocumentContent(url=pdf_url), }, ) diff --git a/.cursor/rules/write_pipelex.mdc b/.cursor/rules/write_pipelex.mdc index 9f23faf..93422cc 100644 --- a/.cursor/rules/write_pipelex.mdc +++ b/.cursor/rules/write_pipelex.mdc @@ -27,10 +27,10 @@ A pipeline file has three main sections: ### Domain Statement ```plx -domain = "domain_name" +domain = "domain_code" description = "Description of the domain" # Optional ``` -Note: The domain name usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. +Note: The domain code usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. ### Concept Definitions @@ -66,7 +66,7 @@ For details on how to structure concepts with fields, see the "Structuring Model ## Pipe Base Definition ```plx -[pipe.your_pipe_name] +[pipe.your_pipe_code] type = "PipeLLM" description = "A description of what your pipe does" inputs = { input_1 = "ConceptName1", input_2 = "ConceptName2" } @@ -475,7 +475,7 @@ The PipeExtract operator is used to extract text and images from an image or a P [pipe.extract_info] type = "PipeExtract" description = "extract the information" -inputs = { document = "PDF" } # or { image = "Image" } if it's an image. This is the only input. +inputs = { document = "Document" } # or { image = "Image" } if it's an image. This is the only input. output = "Page" ``` @@ -484,7 +484,7 @@ Using Extract Model Settings: [pipe.extract_with_model] type = "PipeExtract" description = "Extract with specific model" -inputs = { document = "PDF" } +inputs = { document = "Document" } output = "Page" model = "base_extract_mistral" # Use predefined extract preset or model alias ``` @@ -592,15 +592,16 @@ $sales_rep.phone | $sales_rep.email """ ``` -### Key Parameters +### Key Parameters (Template Mode) -- `template`: Inline template string (mutually exclusive with template_name) +- `template`: Inline template string (mutually exclusive with template_name and construct) - `template_name`: Name of a predefined template (mutually exclusive with template) - `template_category`: Template type ("llm_prompt", "html", "markdown", "mermaid", etc.) - `templating_style`: Styling options for template rendering - `extra_context`: Additional context variables for template For more control, you can use a nested `template` section instead of the `template` field: + - `template.template`: The template string - `template.category`: Template type - `template.templating_style`: Styling options @@ -608,9 +609,143 @@ For more control, you can use a nested `template` section instead of the `templa ### Template Variables Use the same variable insertion rules as PipeLLM: + - `@variable` for block insertion (multi-line content) - `$variable` for inline insertion (short text) +### Construct Mode (for StructuredContent Output) + +PipeCompose can also generate `StructuredContent` objects using the `construct` section. This mode composes field values from fixed values, variable references, templates, or nested structures. + +**When to use construct mode:** + +- You need to output a structured object (not just Text) +- You want to deterministically compose fields from existing data +- No LLM is needed - just data composition and templating + +#### Basic Construct Usage + +```plx +[concept.SalesSummary] +description = "A structured sales summary" + +[concept.SalesSummary.structure] +report_title = { type = "text", description = "Title of the report" } +customer_name = { type = "text", description = "Customer name" } +deal_value = { type = "number", description = "Deal value" } +summary_text = { type = "text", description = "Generated summary text" } + +[pipe.compose_summary] +type = "PipeCompose" +description = "Compose a sales summary from deal data" +inputs = { deal = "Deal" } +output = "SalesSummary" + +[pipe.compose_summary.construct] +report_title = "Monthly Sales Report" +customer_name = { from = "deal.customer_name" } +deal_value = { from = "deal.amount" } +summary_text = { template = "Deal worth $deal.amount with $deal.customer_name" } +``` + +#### Field Composition Methods + +There are four ways to define field values in a construct: + +**1. Fixed Value (literal)** + +Use a literal value directly: + +```plx +[pipe.compose_report.construct] +report_title = "Annual Report" +report_year = 2024 +is_draft = false +``` + +**2. Variable Reference (`from`)** + +Get a value from working memory using a dotted path: + +```plx +[pipe.compose_report.construct] +customer_name = { from = "deal.customer_name" } +total_amount = { from = "order.total" } +street_address = { from = "customer.address.street" } +``` + +**3. Template (`template`)** + +Render a Jinja2 template with variable substitution: + +```plx +[pipe.compose_report.construct] +invoice_number = { template = "INV-$order.id" } +summary = { template = "Deal worth $deal.amount with $deal.customer_name on {{ current_date }}" } +``` + +**4. Nested Construct** + +For nested structures, use a TOML subsection: + +```plx +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +#### Complete Construct Example + +```plx +domain = "invoicing" + +[concept.Address] +description = "A postal address" + +[concept.Address.structure] +street = { type = "text", description = "Street address" } +city = { type = "text", description = "City name" } +country = { type = "text", description = "Country name" } + +[concept.Invoice] +description = "An invoice document" + +[concept.Invoice.structure] +invoice_number = { type = "text", description = "Invoice number" } +total = { type = "number", description = "Total amount" } + +[pipe.compose_invoice] +type = "PipeCompose" +description = "Compose an invoice from order and customer data" +inputs = { order = "Order", customer = "Customer" } +output = "Invoice" + +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +#### Key Parameters (Construct Mode) + +- `construct`: Dictionary mapping field names to their composition rules +- Each field can be: + - A literal value (string, number, boolean) + - A dict with `from` key for variable reference + - A dict with `template` key for template rendering + - A nested dict for nested structures + +**Note:** You must use either `template` or `construct`, not both. They are mutually exclusive. + ## PipeImgGen operator The PipeImgGen operator is used to generate images using AI image generation models. diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 6125caa..af4572e 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -23,10 +23,10 @@ A pipeline file has three main sections: #### Domain Statement ```plx -domain = "domain_name" +domain = "domain_code" description = "Description of the domain" # Optional ``` -Note: The domain name usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. +Note: The domain code usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. #### Concept Definitions @@ -62,7 +62,7 @@ For details on how to structure concepts with fields, see the "Structuring Model ### Pipe Base Definition ```plx -[pipe.your_pipe_name] +[pipe.your_pipe_code] type = "PipeLLM" description = "A description of what your pipe does" inputs = { input_1 = "ConceptName1", input_2 = "ConceptName2" } @@ -471,7 +471,7 @@ The PipeExtract operator is used to extract text and images from an image or a P [pipe.extract_info] type = "PipeExtract" description = "extract the information" -inputs = { document = "PDF" } # or { image = "Image" } if it's an image. This is the only input. +inputs = { document = "Document" } # or { image = "Image" } if it's an image. This is the only input. output = "Page" ``` @@ -480,7 +480,7 @@ Using Extract Model Settings: [pipe.extract_with_model] type = "PipeExtract" description = "Extract with specific model" -inputs = { document = "PDF" } +inputs = { document = "Document" } output = "Page" model = "base_extract_mistral" # Use predefined extract preset or model alias ``` @@ -588,15 +588,16 @@ $sales_rep.phone | $sales_rep.email """ ``` -#### Key Parameters +#### Key Parameters (Template Mode) -- `template`: Inline template string (mutually exclusive with template_name) +- `template`: Inline template string (mutually exclusive with template_name and construct) - `template_name`: Name of a predefined template (mutually exclusive with template) - `template_category`: Template type ("llm_prompt", "html", "markdown", "mermaid", etc.) - `templating_style`: Styling options for template rendering - `extra_context`: Additional context variables for template For more control, you can use a nested `template` section instead of the `template` field: + - `template.template`: The template string - `template.category`: Template type - `template.templating_style`: Styling options @@ -604,9 +605,143 @@ For more control, you can use a nested `template` section instead of the `templa #### Template Variables Use the same variable insertion rules as PipeLLM: + - `@variable` for block insertion (multi-line content) - `$variable` for inline insertion (short text) +#### Construct Mode (for StructuredContent Output) + +PipeCompose can also generate `StructuredContent` objects using the `construct` section. This mode composes field values from fixed values, variable references, templates, or nested structures. + +**When to use construct mode:** + +- You need to output a structured object (not just Text) +- You want to deterministically compose fields from existing data +- No LLM is needed - just data composition and templating + +##### Basic Construct Usage + +```plx +[concept.SalesSummary] +description = "A structured sales summary" + +[concept.SalesSummary.structure] +report_title = { type = "text", description = "Title of the report" } +customer_name = { type = "text", description = "Customer name" } +deal_value = { type = "number", description = "Deal value" } +summary_text = { type = "text", description = "Generated summary text" } + +[pipe.compose_summary] +type = "PipeCompose" +description = "Compose a sales summary from deal data" +inputs = { deal = "Deal" } +output = "SalesSummary" + +[pipe.compose_summary.construct] +report_title = "Monthly Sales Report" +customer_name = { from = "deal.customer_name" } +deal_value = { from = "deal.amount" } +summary_text = { template = "Deal worth $deal.amount with $deal.customer_name" } +``` + +##### Field Composition Methods + +There are four ways to define field values in a construct: + +**1. Fixed Value (literal)** + +Use a literal value directly: + +```plx +[pipe.compose_report.construct] +report_title = "Annual Report" +report_year = 2024 +is_draft = false +``` + +**2. Variable Reference (`from`)** + +Get a value from working memory using a dotted path: + +```plx +[pipe.compose_report.construct] +customer_name = { from = "deal.customer_name" } +total_amount = { from = "order.total" } +street_address = { from = "customer.address.street" } +``` + +**3. Template (`template`)** + +Render a Jinja2 template with variable substitution: + +```plx +[pipe.compose_report.construct] +invoice_number = { template = "INV-$order.id" } +summary = { template = "Deal worth $deal.amount with $deal.customer_name on {{ current_date }}" } +``` + +**4. Nested Construct** + +For nested structures, use a TOML subsection: + +```plx +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Complete Construct Example + +```plx +domain = "invoicing" + +[concept.Address] +description = "A postal address" + +[concept.Address.structure] +street = { type = "text", description = "Street address" } +city = { type = "text", description = "City name" } +country = { type = "text", description = "Country name" } + +[concept.Invoice] +description = "An invoice document" + +[concept.Invoice.structure] +invoice_number = { type = "text", description = "Invoice number" } +total = { type = "number", description = "Total amount" } + +[pipe.compose_invoice] +type = "PipeCompose" +description = "Compose an invoice from order and customer data" +inputs = { order = "Order", customer = "Customer" } +output = "Invoice" + +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Key Parameters (Construct Mode) + +- `construct`: Dictionary mapping field names to their composition rules +- Each field can be: + - A literal value (string, number, boolean) + - A dict with `from` key for variable reference + - A dict with `template` key for template rendering + - A nested dict for nested structures + +**Note:** You must use either `template` or `construct`, not both. They are mutually exclusive. + ### PipeImgGen operator The PipeImgGen operator is used to generate images using AI image generation models. @@ -952,13 +1087,13 @@ So here are a few concrete examples of calls to execute_pipeline with various wa }, ) -## Here we have a single input and it's a PDF. -## Because PDFContent is a native concept, we can use it directly as a value, +## Here we have a single input and it's a document. +## Because DocumentContent is a native concept, we can use it directly as a value, ## the system knows what content it corresponds to: pipe_output = await execute_pipeline( pipe_code="power_extractor_dpe", inputs={ - "document": PDFContent(url=pdf_url), + "document": DocumentContent(url=pdf_url), }, ) @@ -1081,82 +1216,4 @@ result_list = pipe_output.main_stuff_as_items(item_type=GanttChart) ``` --- - -## Rules to choose LLM models used in PipeLLMs. - -### LLM Configuration System - -In order to use it in a pipe, an LLM is referenced by its llm_handle (alias) and possibly by an llm_preset. -LLM configurations are managed through the new inference backend system with files located in `.pipelex/inference/`: - -- **Model Deck**: `.pipelex/inference/deck/base_deck.toml` and `.pipelex/inference/deck/overrides.toml` -- **Backends**: `.pipelex/inference/backends.toml` and `.pipelex/inference/backends/*.toml` -- **Routing**: `.pipelex/inference/routing_profiles.toml` - -### LLM Handles - -An llm_handle can be either: -1. **A direct model name** (like "gpt-4o-mini", "claude-3-sonnet") - automatically available for all models loaded by the inference backend system -2. **An alias** - user-defined shortcuts that map to model names, defined in the `[aliases]` section: - -```toml -[aliases] -base-claude = "claude-4.5-sonnet" -base-gpt = "gpt-5" -base-gemini = "gemini-2.5-flash" -base-mistral = "mistral-medium" -``` - -The system first looks for direct model names, then checks aliases if no direct match is found. The system handles model routing through backends automatically. - -### Using an LLM Handle in a PipeLLM - -Here is an example of using an llm_handle to specify which LLM to use in a PipeLLM: - -```plx -[pipe.hello_world] -type = "PipeLLM" -description = "Write text about Hello World." -output = "Text" -model = { model = "gpt-5", temperature = 0.9 } -prompt = """ -Write a haiku about Hello World. -""" -``` - -As you can see, to use the LLM, you must also indicate the temperature (float between 0 and 1) and max_tokens (either an int or the string "auto"). - -### LLM Presets - -Presets are meant to record the choice of an llm with its hyper parameters (temperature and max_tokens) if it's good for a particular task. LLM Presets are skill-oriented. - -Examples: -```toml -llm_to_engineer = { model = "base-claude", temperature = 1 } -llm_to_extract_invoice = { model = "claude-4.5-sonnet", temperature = 0.1, max_tokens = "auto" } -``` - -The interest is that these presets can be used to set the LLM choice in a PipeLLM, like this: - -```plx -[pipe.extract_invoice] -type = "PipeLLM" -description = "Extract invoice information from an invoice text transcript" -inputs = { invoice_text = "InvoiceText" } -output = "Invoice" -model = "llm_to_extract_invoice" -prompt = """ -Extract invoice information from this invoice: - -The category of this invoice is: $invoice_details.category. - -@invoice_text -""" -``` - -The setting here `model = "llm_to_extract_invoice"` works because "llm_to_extract_invoice" has been declared as an llm_preset in the deck. -You must not use an LLM preset in a PipeLLM that does not exist in the deck. If needed, you can add llm presets. - - -You can override the predefined llm presets by setting them in `.pipelex/inference/deck/overrides.toml`. diff --git a/.pipelex/inference/backends.toml b/.pipelex/inference/backends.toml deleted file mode 100644 index 46cbb79..0000000 --- a/.pipelex/inference/backends.toml +++ /dev/null @@ -1,109 +0,0 @@ -#################################################################################################### -# Pipelex Inference Backends Configuration -#################################################################################################### -# -# This file configures the inference backends available to Pipelex. -# Each backend connects to a different AI service provider (OpenAI, Anthropic, Google, etc.). -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -#################################################################################################### - -[pipelex_gateway] -display_name = "⭐ Pipelex Gateway" -enabled = true # Enable after accepting terms via `pipelex init config` -api_key = "${PIPELEX_GATEWAY_API_KEY}" - -[anthropic] -enabled = false -api_key = "${ANTHROPIC_API_KEY}" -valued_constraints = { max_output_tokens_limit = 8192 } - -[azure_openai] -display_name = "Azure OpenAI" -enabled = false -endpoint = "${AZURE_API_BASE}" -api_key = "${AZURE_API_KEY}" -api_version = "${AZURE_API_VERSION}" - -[bedrock] -display_name = "Amazon Bedrock" -enabled = false -aws_region = "${AWS_REGION}" - -[blackboxai] -display_name = "BlackBox AI" -enabled = false -endpoint = "https://api.blackbox.ai/v1" -api_key = "${BLACKBOX_API_KEY}" - -[fal] -display_name = "FAL" -enabled = false -api_key = "${FAL_API_KEY}" - -[google] -display_name = "Google AI" -enabled = false -api_key = "${GOOGLE_API_KEY}" - -[groq] -display_name = "Groq" -enabled = false -endpoint = "https://api.groq.com/openai/v1" -api_key = "${GROQ_API_KEY}" - -[huggingface] -display_name = "Hugging Face" -enabled = false -api_key = "${HF_TOKEN}" - -[mistral] -display_name = "Mistral AI" -enabled = false -api_key = "${MISTRAL_API_KEY}" - -[ollama] -enabled = false -endpoint = "http://localhost:11434/v1" - -[openai] -display_name = "OpenAI" -enabled = false -api_key = "${OPENAI_API_KEY}" - -[portkey] -display_name = "Portkey" -enabled = false -endpoint = "https://api.portkey.ai/v1" -api_key = "${PORTKEY_API_KEY}" - -[scaleway] -display_name = "Scaleway" -enabled = false -endpoint = "${SCALEWAY_ENDPOINT}" -api_key = "${SCALEWAY_API_KEY}" - -[vertexai] -display_name = "Google Vertex AI" -enabled = false # This is the only one we disable beacuse setting it up requires internet access just to get credentials so it fails in CI sandboxes -gcp_project_id = "${GCP_PROJECT_ID}" -gcp_location = "${GCP_LOCATION}" -gcp_credentials_file_path = "${GCP_CREDENTIALS_FILE_PATH}" - -[xai] -display_name = "xAI" -enabled = false -endpoint = "https://api.x.ai/v1" -api_key = "${XAI_API_KEY}" - -[internal] # software-only backend, runs internally, without AI -enabled = true - -# Deprecated -[pipelex_inference] -display_name = "🛑 Legacy Pipelex Inference" -enabled = false -endpoint = "https://inference.pipelex.com/v1" -api_key = "${PIPELEX_INFERENCE_API_KEY}" diff --git a/.pipelex/inference/backends/anthropic.toml b/.pipelex/inference/backends/anthropic.toml deleted file mode 100644 index 0f04f4d..0000000 --- a/.pipelex/inference/backends/anthropic.toml +++ /dev/null @@ -1,100 +0,0 @@ -################################################################################ -# Anthropic Backend Configuration -################################################################################ -# -# This file defines the model specifications for Anthropic Claude models. -# It contains model definitions for various Claude language models -# accessible through the Anthropic API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["claude-3.5-sonnet"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "anthropic" -prompting_target = "anthropic" -structure_method = "instructor/anthropic_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- Claude 3 Series ---------------------------------------------------------- -[claude-3-haiku] -model_id = "claude-3-haiku-20240307" -max_tokens = 4096 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 0.25, output = 1.25 } - -# --- Claude 3.7 Series -------------------------------------------------------- -["claude-3.7-sonnet"] -model_id = "claude-3-7-sonnet-20250219" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -# --- Claude 4 Series ---------------------------------------------------------- -[claude-4-sonnet] -model_id = "claude-sonnet-4-20250514" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -[claude-4-opus] -model_id = "claude-opus-4-20250514" -max_tokens = 32000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -# --- Claude 4.1 Series -------------------------------------------------------- -["claude-4.1-opus"] -model_id = "claude-opus-4-1-20250805" -max_tokens = 32000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -# --- Claude 4.5 Series -------------------------------------------------------- -["claude-4.5-sonnet"] -model_id = "claude-sonnet-4-5-20250929" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -["claude-4.5-haiku"] -model_id = "claude-haiku-4-5-20251001" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 1.0, output = 5.0 } - -["claude-4.5-opus"] -model_id = "claude-opus-4-5-20251101" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 5.0, output = 25.0 } diff --git a/.pipelex/inference/backends/azure_openai.toml b/.pipelex/inference/backends/azure_openai.toml deleted file mode 100644 index 8a89898..0000000 --- a/.pipelex/inference/backends/azure_openai.toml +++ /dev/null @@ -1,213 +0,0 @@ -################################################################################ -# Azure OpenAI Backend Configuration -################################################################################ -# -# This file defines the model specifications for Azure OpenAI models. -# It contains model definitions for OpenAI models deployed on Azure -# accessible through the Azure OpenAI API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "azure_openai_responses" -prompting_target = "openai" -structure_method = "instructor/openai_responses_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- GPT-4o Series ------------------------------------------------------------ -[gpt-4o] -model_id = "gpt-4o-2024-11-20" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.5, output = 10.0 } - -[gpt-4o-mini] -model_id = "gpt-4o-mini-2024-07-18" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.6 } - -# --- GPT-4.1 Series ----------------------------------------------------------- -["gpt-4.1"] -model_id = "gpt-4.1-2025-04-14" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2, output = 8 } - -["gpt-4.1-mini"] -model_id = "gpt-4.1-mini-2025-04-14" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.4, output = 1.6 } - -["gpt-4.1-nano"] -model_id = "gpt-4.1-nano-2025-04-14" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.1, output = 0.4 } - -# --- o Series ---------------------------------------------------------------- -[o1-mini] -model_id = "o1-mini-2024-09-12" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 3.0, output = 12.0 } -valued_constraints = { fixed_temperature = 1 } - -[o1] -model_id = "o1-2024-12-17" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 15.0, output = 60.0 } -valued_constraints = { fixed_temperature = 1 } - -[o3-mini] -model_id = "o3-mini-2025-01-31" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.1, output = 4.4 } -valued_constraints = { fixed_temperature = 1 } - -[o3] -model_id = "o3-2025-04-16" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 2, output = 8 } -valued_constraints = { fixed_temperature = 1 } - -# --- GPT-5 Series ------------------------------------------------------------- -[gpt-5-mini] -model_id = "gpt-5-mini-2025-08-07" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.25, output = 2.0 } -valued_constraints = { fixed_temperature = 1 } - -[gpt-5-nano] -model_id = "gpt-5-nano-2025-08-07" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.05, output = 0.4 } -valued_constraints = { fixed_temperature = 1 } - -[gpt-5-chat] -model_id = "gpt-5-chat-2025-08-07" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -[gpt-5] -model_id = "gpt-5-2025-08-07" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -# --- GPT-5.1 Series ------------------------------------------------------------- -["gpt-5.1"] -model_id = "gpt-5.1-2025-11-13" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -["gpt-5.1-chat"] -model_id = "gpt-5.1-chat-2025-11-13" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -["gpt-5.1-codex"] -model_id = "gpt-5.1-codex-2025-11-13" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -# --- GPT-5.2 Series ------------------------------------------------------------- -["gpt-5.2"] -model_id = "gpt-5.2-2025-12-11" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.75, output = 14.0 } - -["gpt-5.2-chat"] -model_id = "gpt-5.2-chat-2025-12-11" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -################################################################################ -# IMAGE GENERATION MODELS -################################################################################ - -# --- OpenAI Image Generation -------------------------------------------------- -[gpt-image-1] -sdk = "azure_rest_img_gen" -model_type = "img_gen" -model_id = "gpt-image-1-2025-04-15" -inputs = ["text"] -outputs = ["image"] -costs = { input = 10, output = 40 } - -[gpt-image-1.rules] -prompt = "positive_only" -num_images = "gpt" -aspect_ratio = "gpt" -background = "gpt" -inference = "gpt" -safety_checker = "unavailable" -output_format = "gpt" - -[gpt-image-1-mini] -sdk = "azure_rest_img_gen" -model_type = "img_gen" -model_id = "gpt-image-1-mini-2025-10-06" -inputs = ["text"] -outputs = ["image"] -costs = { input = 2.5, output = 8 } - -[gpt-image-1-mini.rules] -prompt = "positive_only" -num_images = "gpt" -aspect_ratio = "gpt" -background = "gpt" -inference = "gpt" -safety_checker = "unavailable" -output_format = "gpt" - -["gpt-image-1.5"] -sdk = "azure_rest_img_gen" -model_type = "img_gen" -model_id = "gpt-image-1.5-2025-12-16" -inputs = ["text"] -outputs = ["image"] -costs = { input = 8, output = 32 } - -["gpt-image-1.5".rules] -prompt = "positive_only" -num_images = "gpt" -aspect_ratio = "gpt" -background = "gpt" -inference = "gpt" -safety_checker = "unavailable" -output_format = "gpt" diff --git a/.pipelex/inference/backends/bedrock.toml b/.pipelex/inference/backends/bedrock.toml deleted file mode 100644 index c4ab176..0000000 --- a/.pipelex/inference/backends/bedrock.toml +++ /dev/null @@ -1,120 +0,0 @@ -################################################################################ -# Amazon Bedrock Backend Configuration -################################################################################ -# -# This file defines the model specifications for Amazon Bedrock models. -# It contains model definitions for various language models -# accessible through the Amazon Bedrock service. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["claude-3.5-sonnet"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "bedrock_aioboto3" -prompting_target = "anthropic" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- Mistral Models ----------------------------------------------------------- -[bedrock-mistral-large] -model_id = "mistral.mistral-large-2407-v1:0" -max_tokens = 8192 -inputs = ["text"] -outputs = ["text"] -costs = { input = 4.0, output = 12.0 } - -# --- Meta Llama Models -------------------------------------------------------- -[bedrock-meta-llama-3-3-70b-instruct] -model_id = "us.meta.llama3-3-70b-instruct-v1:0" -max_tokens = 8192 -inputs = ["text"] -outputs = ["text"] -# TODO: find out the actual cost per million tokens for llama3 on bedrock -costs = { input = 3.0, output = 15.0 } - -# --- Amazon Nova Models ------------------------------------------------------- -[bedrock-nova-pro] -model_id = "us.amazon.nova-pro-v1:0" -max_tokens = 5120 -inputs = ["text"] -outputs = ["text"] -# TODO: find out the actual cost per million tokens for nova on bedrock -costs = { input = 3.0, output = 15.0 } - -# --- Claude LLMs -------------------------------------------------------------- -["claude-3.7-sonnet"] -sdk = "bedrock_anthropic" -model_id = "us.anthropic.claude-3-7-sonnet-20250219-v1:0" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -[claude-4-sonnet] -sdk = "bedrock_anthropic" -model_id = "us.anthropic.claude-sonnet-4-20250514-v1:0" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -[claude-4-opus] -sdk = "bedrock_anthropic" -model_id = "us.anthropic.claude-opus-4-20250514-v1:0" -max_tokens = 32000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -["claude-4.1-opus"] -sdk = "bedrock_anthropic" -model_id = "us.anthropic.claude-opus-4-1-20250805-v1:0" -max_tokens = 32000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -["claude-4.5-sonnet"] -sdk = "bedrock_anthropic" -model_id = "us.anthropic.claude-sonnet-4-5-20250929-v1:0" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } - -["claude-4.5-haiku"] -sdk = "bedrock_anthropic" -model_id = "us.anthropic.claude-haiku-4-5-20251001-v1:0" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 1.0, output = 5.0 } - -["claude-4.5-opus"] -sdk = "bedrock_anthropic" -model_id = "global.anthropic.claude-opus-4-5-20251101-v1:0" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 5.0, output = 25.0 } diff --git a/.pipelex/inference/backends/blackboxai.toml b/.pipelex/inference/backends/blackboxai.toml deleted file mode 100644 index 9ee0433..0000000 --- a/.pipelex/inference/backends/blackboxai.toml +++ /dev/null @@ -1,240 +0,0 @@ -################################################################################ -# BlackBoxAI Backend Configuration -################################################################################ -# -# This file defines the model specifications for BlackBoxAI models. -# It contains model definitions for various language models from different providers -# accessible through the BlackBoxAI API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["gpt-4.5-preview"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai" -structure_method = "instructor/openai_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- OpenAI Models ------------------------------------------------------------ -[gpt-4o-mini] -model_id = "blackboxai/openai/gpt-4o-mini" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.60 } - -[gpt-4o] -model_id = "blackboxai/openai/gpt-4o" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.50, output = 10.00 } - -[o1-mini] -model_id = "blackboxai/openai/o1-mini" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.10, output = 4.40 } - -[o4-mini] -model_id = "blackboxai/openai/o4-mini" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.10, output = 4.40 } - -# --- Claude LLMs -------------------------------------------------------------- -["claude-3.5-haiku"] -model_id = "blackboxai/anthropic/claude-3.5-haiku" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.80, output = 4.00 } - -["claude-3.5-sonnet"] -model_id = "blackboxai/anthropic/claude-3.5-sonnet" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 3.00, output = 15.00 } - -["claude-3.7-sonnet"] -model_id = "blackboxai/anthropic/claude-3.7-sonnet" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 3.00, output = 15.00 } - -[claude-opus-4] -model_id = "blackboxai/anthropic/claude-opus-4" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 15.00, output = 75.00 } - -[claude-4-sonnet] -model_id = "blackboxai/anthropic/claude-sonnet-4" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 3.00, output = 15.00 } - -["claude-4.5-sonnet"] -model_id = "blackboxai/anthropic/claude-sonnet-4.5" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.28, output = 1.10 } - -# --- Google Models ------------------------------------------------------------ -["gemini-2.5-flash"] -model_id = "blackboxai/google/gemini-2.5-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.30, output = 2.50 } - -["gemini-2.5-pro"] -model_id = "blackboxai/google/gemini-2.5-pro" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.00 } - -["gemini-flash-1.5-8b"] -model_id = "blackboxai/google/gemini-flash-1.5-8b" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.04, output = 0.15 } - -# --- Mistral Models ----------------------------------------------------------- -[mistral-large] -model_id = "blackboxai/mistralai/mistral-large" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 2.00, output = 6.00 } - -[pixtral-large-2411] -model_id = "blackboxai/mistralai/pixtral-large-2411" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.00, output = 6.00 } - -# --- Meta Llama Models -------------------------------------------------------- -["llama-3.3-70b-instruct"] -model_id = "blackboxai/meta-llama/llama-3.3-70b-instruct" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.04, output = 0.12 } - -["llama-3.2-11b-vision-instruct"] -model_id = "blackboxai/meta-llama/llama-3.2-11b-vision-instruct" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.05, output = 0.05 } - -# --- Qwen Models -------------------------------------------------------------- -["qwen-2.5-72b-instruct"] -model_id = "blackboxai/qwen/qwen-2.5-72b-instruct" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.12, output = 0.39 } - -["qwen2.5-vl-72b-instruct"] -model_id = "blackboxai/qwen/qwen2.5-vl-72b-instruct" -inputs = ["text", "images"] -outputs = ["text"] -costs = { input = 0.25, output = 0.75 } - -# --- Amazon Nova Models ------------------------------------------------------- -[nova-micro-v1] -model_id = "blackboxai/amazon/nova-micro-v1" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.04, output = 0.14 } - -[nova-lite-v1] -model_id = "blackboxai/amazon/nova-lite-v1" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.06, output = 0.24 } - -################################################################################ -# FREE MODELS -################################################################################ - -# --- DeepSeek Free Models ----------------------------------------------------- -[deepseek-chat] -model_id = "blackboxai/deepseek/deepseek-chat:free" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.00, output = 0.00 } - -[deepseek-r1] -model_id = "blackboxai/deepseek/deepseek-r1:free" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.00, output = 0.00 } - -# --- Meta Llama Free Models --------------------------------------------------- -["llama-3.3-70b-instruct-free"] -model_id = "blackboxai/meta-llama/llama-3.3-70b-instruct:free" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.00, output = 0.00 } - - -################################################################################ -# IMAGE GENERATION MODELS -################################################################################ - -[flux-pro] -model_type = "img_gen" -sdk = "blackboxai_img_gen" -model_id = "blackboxai/black-forest-labs/flux-pro" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.04 } - -["flux-pro/v1.1"] -model_type = "img_gen" -sdk = "blackboxai_img_gen" -model_id = "blackboxai/black-forest-labs/flux-1.1-pro" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.04 } - -["flux-pro/v1.1-ultra"] -model_type = "img_gen" -sdk = "blackboxai_img_gen" -model_id = "blackboxai/black-forest-labs/flux-1.1-pro-ultra" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.06 } - -[fast-lightning-sdxl] -model_type = "img_gen" -sdk = "blackboxai_img_gen" -model_id = "blackboxai/bytedance/sdxl-lightning-4step" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.0014 } - -[nano-banana] -model_type = "img_gen" -sdk = "blackboxai_img_gen" -model_id = "blackboxai/google/nano-banana" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.039 } - -[nano-banana-pro] -model_type = "img_gen" -sdk = "blackboxai_img_gen" -model_id = "blackboxai/google/nano-banana-pro" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.039 } diff --git a/.pipelex/inference/backends/fal.toml b/.pipelex/inference/backends/fal.toml deleted file mode 100644 index 3433f99..0000000 --- a/.pipelex/inference/backends/fal.toml +++ /dev/null @@ -1,107 +0,0 @@ -################################################################################ -# FAL Backend Configuration -################################################################################ -# -# This file defines the model specifications for FAL (Fast AI Labs) models. -# It contains model definitions for various image generation models -# accessible through the FAL API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["flux-pro/v1.1"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "img_gen" -sdk = "fal" -prompting_target = "fal" - -################################################################################ -# IMAGE GENERATION MODELS -################################################################################ - -# --- Flux Pro Series ---------------------------------------------------------- -[flux-pro] -model_id = "fal-ai/flux-pro" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.05, output = 0.0 } - -[flux-pro.rules] -prompt = "positive_only" -num_images = "fal" -aspect_ratio = "flux" -inference = "flux" -safety_checker = "available" -output_format = "flux_1" -specific = "fal" - -["flux-pro/v1.1"] -model_id = "fal-ai/flux-pro/v1.1" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.05, output = 0.0 } - -["flux-pro/v1.1".rules] -prompt = "positive_only" -num_images = "fal" -aspect_ratio = "flux" -inference = "flux" -safety_checker = "available" -output_format = "flux_1" -specific = "fal" - -["flux-pro/v1.1-ultra"] -model_id = "fal-ai/flux-pro/v1.1-ultra" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.06, output = 0.0 } - -["flux-pro/v1.1-ultra".rules] -prompt = "positive_only" -num_images = "fal" -aspect_ratio = "flux_11_ultra" -inference = "flux_11_ultra" -safety_checker = "available" -output_format = "flux_1" -specific = "fal" - -[flux-2] -model_id = "fal-ai/flux-2" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.05, output = 0.0 } - -[flux-2.rules] -prompt = "positive_only" -num_images = "fal" -aspect_ratio = "flux" -inference = "flux" -safety_checker = "available" -output_format = "flux_2" -specific = "fal" - -# --- SDXL models -------------------------------------------------------------- -[fast-lightning-sdxl] -model_id = "fal-ai/fast-lightning-sdxl" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0003, output = 0.0 } - -[fast-lightning-sdxl.rules] -prompt = "positive_only" -num_images = "fal" -aspect_ratio = "flux" -inference = "sdxl_lightning" -safety_checker = "unavailable" -output_format = "sdxl" -specific = "fal" diff --git a/.pipelex/inference/backends/google.toml b/.pipelex/inference/backends/google.toml deleted file mode 100644 index 36e19c2..0000000 --- a/.pipelex/inference/backends/google.toml +++ /dev/null @@ -1,94 +0,0 @@ -################################################################################ -# Google Gemini API Backend Configuration -################################################################################ -# -# This file defines the model specifications for Google Gemini API models. -# It contains model definitions for Gemini language models -# accessible through the Google Gemini API (not VertexAI). -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["gemini-2.0-flash"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "google" -prompting_target = "gemini" -structure_method = "instructor/genai_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- Gemini 2.0 Series ---------------------------------------- -["gemini-2.0-flash"] -model_id = "gemini-2.0-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 0.10, output = 0.40 } - -# --- Gemini 2.5 Series ---------------------------------------- -["gemini-2.5-pro"] -model_id = "gemini-2.5-pro" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 1.25, output = 10.0 } - -["gemini-2.5-flash"] -model_id = "gemini-2.5-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 0.30, output = 2.50 } - -["gemini-2.5-flash-lite"] -model_id = "gemini-2.5-flash-lite" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 0.10, output = 0.40 } - -# --- Gemini 3.0 Series ---------------------------------------- -["gemini-3.0-pro"] -model_id = "gemini-3-pro-preview" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 2, output = 12.0 } - -["gemini-3.0-flash-preview"] -model_id = "gemini-3-flash-preview" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 0.5, output = 3.0 } - -################################################################################ -# IMAGE GENERATION MODELS (Nano Banana) -################################################################################ - -[nano-banana] -model_type = "img_gen" -model_id = "gemini-2.5-flash-image" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.039 } - -[nano-banana-pro] -model_type = "img_gen" -model_id = "gemini-3-pro-image-preview" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.039 } diff --git a/.pipelex/inference/backends/groq.toml b/.pipelex/inference/backends/groq.toml deleted file mode 100644 index 72bdae3..0000000 --- a/.pipelex/inference/backends/groq.toml +++ /dev/null @@ -1,129 +0,0 @@ -################################################################################ -# Groq Backend Configuration -################################################################################ -# -# This file defines the model specifications for Groq models. -# It contains model definitions for various LLM models accessible through -# the Groq API, including text-only and vision-capable models. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots or slashes must be quoted (e.g., ["meta-llama/llama-4-scout"]) -# - Model costs are in USD per million tokens (input/output) -# - Vision models support max 5 images per request, 33MP max resolution -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai" -structure_method = "instructor/json" - -################################################################################ -# PRODUCTION TEXT MODELS -################################################################################ - -# --- Meta Llama 3.x Series ---------------------------------------------------- -["llama-3.1-8b-instant"] -model_id = "llama-3.1-8b-instant" -max_tokens = 131072 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.05, output = 0.08 } - -["llama-3.3-70b-versatile"] -model_id = "llama-3.3-70b-versatile" -max_tokens = 32768 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.59, output = 0.79 } - -# --- Meta Llama Guard --------------------------------------------------------- -[llama-guard-4-12b] -model_id = "meta-llama/llama-guard-4-12b" -max_tokens = 1024 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.20, output = 0.20 } - -# --- OpenAI GPT-OSS Models ---------------------------------------------------- -[gpt-oss-20b] -model_id = "openai/gpt-oss-20b" -max_tokens = 65536 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.075, output = 0.30 } - -[gpt-oss-120b] -model_id = "openai/gpt-oss-120b" -max_tokens = 65536 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.60 } - -# --- Groq Compound Systems ---------------------------------------------------- -["groq/compound"] -model_id = "groq/compound" -max_tokens = 8192 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.45 } - -["groq/compound-mini"] -model_id = "groq/compound-mini" -max_tokens = 8192 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.10, output = 0.30 } - -################################################################################ -# PREVIEW MODELS -################################################################################ - -# --- Meta Llama 4 Vision Models (Preview) ------------------------------------- -[llama-4-scout-17b-16e-instruct] -model_id = "meta-llama/llama-4-scout-17b-16e-instruct" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 5 -costs = { input = 0.11, output = 0.34 } - -[llama-4-maverick-17b-128e-instruct] -model_id = "meta-llama/llama-4-maverick-17b-128e-instruct" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 5 -costs = { input = 0.20, output = 0.60 } - -# --- Moonshot Kimi K2 --------------------------------------------------------- -[kimi-k2-instruct-0905] -model_id = "moonshotai/kimi-k2-instruct-0905" -max_tokens = 16384 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.00, output = 3.00 } - -# --- OpenAI Safety Model ------------------------------------------------------ -[gpt-oss-safeguard-20b] -model_id = "openai/gpt-oss-safeguard-20b" -max_tokens = 65536 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.075, output = 0.30 } - -# --- Qwen 3 ------------------------------------------------------------------- -[qwen3-32b] -model_id = "qwen/qwen3-32b" -max_tokens = 40960 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.29, output = 0.59 } diff --git a/.pipelex/inference/backends/huggingface.toml b/.pipelex/inference/backends/huggingface.toml deleted file mode 100644 index 1a79638..0000000 --- a/.pipelex/inference/backends/huggingface.toml +++ /dev/null @@ -1,43 +0,0 @@ -################################################################################ -# Hugging Face Backend Configuration -################################################################################ -# -# This file defines the model specifications for Hugging Face models. -# It contains model definitions for various image generation models -# accessible through the Hugging Face Inference API with provider="auto". -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots or slashes must be quoted (e.g., ["stabilityai/stable-diffusion-2-1"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "img_gen" -sdk = "huggingface_img_gen" - -################################################################################ -# IMAGE GENERATION MODELS -################################################################################ - -# --- Qwen Image Models -------------------------------------------------- -[qwen-image] -model_id = "Qwen/Qwen-Image" -inputs = ["text"] -outputs = ["image"] -costs = { input = 0.0, output = 0.0 } -variant = "fal-ai" -# variant = "replicate" - -[qwen-image.rules] -prompt = "with_negative" -aspect_ratio = "qwen_image" -inference = "qwen_image" diff --git a/.pipelex/inference/backends/internal.toml b/.pipelex/inference/backends/internal.toml deleted file mode 100644 index e44b222..0000000 --- a/.pipelex/inference/backends/internal.toml +++ /dev/null @@ -1,37 +0,0 @@ -################################################################################ -# Internal Backend Configuration -################################################################################ -# -# This file defines the model specifications for internal software-only models. -# These models run internally without external APIs or AI services. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# TEXT EXTRACTION MODELS -################################################################################ - -# --- PyPDFium2 Text Extractor ------------------------------------------------- -[pypdfium2-extract-pdf] -model_type = "text_extractor" -sdk = "pypdfium2" -model_id = "extract-text" -inputs = ["pdf"] -outputs = ["pages"] -costs = {} - -# --- Docling Text Extractor --------------------------------------------------- -[docling-extract-text] -model_type = "text_extractor" -sdk = "docling_sdk" -model_id = "extract-text" -inputs = ["pdf", "image"] -outputs = ["pages"] -costs = {} diff --git a/.pipelex/inference/backends/mistral.toml b/.pipelex/inference/backends/mistral.toml deleted file mode 100644 index b695ac7..0000000 --- a/.pipelex/inference/backends/mistral.toml +++ /dev/null @@ -1,164 +0,0 @@ -################################################################################ -# Mistral Backend Configuration -################################################################################ -# -# This file defines the model specifications for Mistral AI models. -# It contains model definitions for various Mistral language models and specialized models -# accessible through the Mistral API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["ministral-3b"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "mistral" -prompting_target = "mistral" -structure_method = "instructor/mistral_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- Ministral Series --------------------------------------------------------- -[ministral-3b] -model_id = "ministral-3b-latest" -max_tokens = 131072 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.04, output = 0.04 } - -[ministral-8b] -model_id = "ministral-8b-latest" -max_tokens = 131072 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.1, output = 0.1 } - -# --- Mistral 7B Series -------------------------------------------------------- -[mistral-7b-2312] -model_id = "mistral-large-2402" -max_tokens = 32768 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.25, output = 0.25 } - -# --- Mistral 8x7B Series ------------------------------------------------------ -[mistral-8x7b-2312] -model_id = "open-mixtral-8x7b" -max_tokens = 32768 -inputs = ["text"] -outputs = ["text"] -costs = { input = 0.7, output = 0.7 } - -# --- Mistral Codestral Series ------------------------------------------------- -[mistral-codestral-2405] -model_id = "codestral-2405" -max_tokens = 262144 -inputs = ["text"] -outputs = ["text"] -costs = { input = 1.0, output = 3.0 } - -# --- Mistral Large Series ----------------------------------------------------- -[mistral-large-2402] -model_id = "mistral-large-2402" -max_tokens = 32768 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 4.0, output = 12.0 } - -[mistral-large] -model_id = "mistral-large-latest" -max_tokens = 131072 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 4.0, output = 12.0 } - -# --- Mistral Small Series ----------------------------------------------------- -[mistral-small-2402] -model_id = "mistral-small-2402" -max_tokens = 32768 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.0, output = 3.0 } - -[mistral-small] -model_id = "mistral-small-latest" -max_tokens = 32768 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.0, output = 3.0 } - -# --- Pixtral Series ----------------------------------------------------------- -[pixtral-12b] -model_id = "pixtral-12b-latest" -max_tokens = 131072 -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.15 } - -[pixtral-large] -model_id = "pixtral-large-latest" -max_tokens = 131072 -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.0, output = 6.0 } - -# --- Mistral Medium Series ---------------------------------------------------- -[mistral-medium] -model_id = "mistral-medium-latest" -max_tokens = 128000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.4, output = 2.0 } - -[mistral-medium-2508] -model_id = "mistral-medium-2508" -max_tokens = 128000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.4, output = 2.0 } - -################################################################################ -# EXTRACTION MODELS -################################################################################ - -# TODO: add support to pricing per page - -[mistral-ocr-2503] -model_type = "text_extractor" -model_id = "mistral-ocr-2503" -max_tokens = 16384 -inputs = ["pdf", "image"] -outputs = ["pages"] - -[mistral-ocr-2505] -model_type = "text_extractor" -model_id = "mistral-ocr-2505" -max_tokens = 16384 -inputs = ["pdf", "image"] -outputs = ["pages"] - -[mistral-ocr-2512] -model_type = "text_extractor" -model_id = "mistral-ocr-2512" -max_tokens = 16384 -inputs = ["pdf", "image"] -outputs = ["pages"] - -[mistral-ocr] -model_type = "text_extractor" -model_id = "mistral-ocr-latest" -max_tokens = 16384 -inputs = ["pdf", "image"] -outputs = ["pages"] diff --git a/.pipelex/inference/backends/ollama.toml b/.pipelex/inference/backends/ollama.toml deleted file mode 100644 index 397e9ac..0000000 --- a/.pipelex/inference/backends/ollama.toml +++ /dev/null @@ -1,63 +0,0 @@ -################################################################################ -# Ollama Backend Configuration -################################################################################ -# -# This file defines the model specifications for Ollama models. -# It contains model definitions for local language models -# accessible through the Ollama API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["mistral-small3.1-24b"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai" -prompting_target = "anthropic" -structure_method = "instructor/openai_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- Gemma Models ------------------------------------------------------------- -[gemma3-4b] -model_id = "gemma3:4b" -inputs = ["text"] -outputs = ["text"] -max_prompt_images = 3000 -costs = { input = 0, output = 0 } - -# --- Llama Models ------------------------------------------------------------- -[llama4-scout] -model_id = "llama4:scout" -inputs = ["text"] -outputs = ["text"] -max_prompt_images = 3000 -costs = { input = 0, output = 0 } - -# --- Mistral Models ----------------------------------------------------------- -["mistral-small3.1-24b"] -model_id = "mistral-small3.1:24b" -inputs = ["text"] -outputs = ["text"] -max_prompt_images = 3000 -costs = { input = 0, output = 0 } - -# --- Qwen Models -------------------------------------------------------------- -[qwen3-8b] -model_id = "qwen3:8b" -inputs = ["text"] -outputs = ["text"] -costs = { input = 0, output = 0 } -# TODO: support tokens diff --git a/.pipelex/inference/backends/openai.toml b/.pipelex/inference/backends/openai.toml deleted file mode 100644 index e61d52e..0000000 --- a/.pipelex/inference/backends/openai.toml +++ /dev/null @@ -1,208 +0,0 @@ -################################################################################ -# OpenAI Backend Configuration -################################################################################ -# -# This file defines the model specifications for OpenAI models. -# It contains model definitions for various LLM and image generation models -# accessible through the OpenAI API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai_responses" -prompting_target = "openai" -structure_method = "instructor/openai_responses_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- GPT-3.5 Series ----------------------------------------------------------- -["gpt-3.5-turbo"] -model_id = "gpt-3.5-turbo-1106" -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.5, output = 1.5 } - -# --- GPT-4 Series ------------------------------------------------------------- -[gpt-4] -inputs = ["text"] -outputs = ["text"] -costs = { input = 30.0, output = 60.0 } - -[gpt-4-turbo] -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 10.0, output = 30.0 } - -# --- GPT-4o Series ------------------------------------------------------------ -[gpt-4o-2024-11-20] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.5, output = 10.0 } - -[gpt-4o] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.5, output = 10.0 } - -[gpt-4o-mini-2024-07-18] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.6 } - -[gpt-4o-mini] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.6 } - -# --- GPT-4.1 Series ----------------------------------------------------------- -["gpt-4.1"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2, output = 8 } - -["gpt-4.1-mini"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.4, output = 1.6 } - -["gpt-4.1-nano"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.1, output = 0.4 } - -# --- o Series ---------------------------------------------------------------- -[o1] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 15.0, output = 60.0 } -valued_constraints = { fixed_temperature = 1 } - -[o3-mini] -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.1, output = 4.4 } -valued_constraints = { fixed_temperature = 1 } - -[o3] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 10.0, output = 40.0 } -valued_constraints = { fixed_temperature = 1 } - -[o4-mini] -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.1, output = 4.4 } -valued_constraints = { fixed_temperature = 1 } - -# --- GPT-5 Series ------------------------------------------------------------- -[gpt-5] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -[gpt-5-mini] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.25, output = 2.0 } -valued_constraints = { fixed_temperature = 1 } - -[gpt-5-nano] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.05, output = 0.4 } -valued_constraints = { fixed_temperature = 1 } - -[gpt-5-chat] -model_id = "gpt-5-chat-latest" -inputs = ["text", "images"] -outputs = ["text"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -[gpt-5-codex] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -# --- GPT-5.1 Series ------------------------------------------------------------- -["gpt-5.1"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } - -["gpt-5.1-chat"] -model_id = "gpt-5.1-chat-latest" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -["gpt-5.1-codex"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -["gpt-5.1-codex-max"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } - -# --- GPT-5.2 Series ------------------------------------------------------------- -["gpt-5.2"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.75, output = 14.0 } - -["gpt-5.2-chat"] -model_id = "gpt-5.2-chat-latest" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.75, output = 14.0 } -valued_constraints = { fixed_temperature = 1 } - -################################################################################ -# IMAGE GENERATION MODELS -################################################################################ - -# --- OpenAI Image Generation -------------------------------------------------- -[gpt-image-1] -sdk = "openai_img_gen" -model_type = "img_gen" -inputs = ["text"] -outputs = ["image"] -costs = { input = 10, output = 40 } - -[gpt-image-1-mini] -sdk = "openai_img_gen" -model_type = "img_gen" -inputs = ["text"] -outputs = ["image"] -costs = { input = 2.5, output = 8 } - -["gpt-image-1.5"] -sdk = "openai_img_gen" -model_type = "img_gen" -model_id = "gpt-image-1.5" -inputs = ["text"] -outputs = ["image"] -costs = { input = 8, output = 32 } diff --git a/.pipelex/inference/backends/pipelex_gateway.toml b/.pipelex/inference/backends/pipelex_gateway.toml deleted file mode 100644 index bca075b..0000000 --- a/.pipelex/inference/backends/pipelex_gateway.toml +++ /dev/null @@ -1,41 +0,0 @@ -################################################################################ -# Pipelex Gateway Local Overrides -################################################################################ -# -# TELEMETRY NOTICE: -# -# Using Pipelex Gateway enables identified telemetry tied to your API key -# (hashed for security). This is independent from your telemetry.toml settings. -# -# We collect only technical data (model names, token counts, latency, error rates). -# We do NOT collect prompts, completions, pipe codes, or business data. -# -# This allows us to monitor service quality, enforce fair usage, and support you. -# -################################################################################ -# -# WARNING: USE AT YOUR OWN RISK! -# -# The actual model configuration is fetched remotely from Pipelex servers. -# Any override in this file may cause unexpected behavior or failures, -# as the remote configuration may change at any time. -# -# If you must override, you may ONLY use these keys per model: -# - sdk -# - structure_method -# -# All other keys will be ignored. -# -# If you need custom configurations, consider using your own API keys -# with direct provider backends (openai, anthropic, etc.) instead. -# -# Documentation: -# https://docs.pipelex.com/home/7-configuration/config-technical/inference-backend-config/ -# Support: https://go.pipelex.com/discord -# -################################################################################ - -# Per-model overrides example: -# [gpt-4o] -# sdk = "gateway_completions" -# structure_method = "instructor/openai_tools" diff --git a/.pipelex/inference/backends/pipelex_inference.toml b/.pipelex/inference/backends/pipelex_inference.toml deleted file mode 100644 index 751c570..0000000 --- a/.pipelex/inference/backends/pipelex_inference.toml +++ /dev/null @@ -1,205 +0,0 @@ -################################################################################ -# Pipelex Inference Backend Configuration -################################################################################ -# -# This file defines the model specifications for the Pipelex Inference backend. -# It contains model definitions for various LLM and image generation models -# accessible through the Pipelex unified inference API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai" -prompting_target = "anthropic" -structure_method = "instructor/openai_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- OpenAI LLMs -------------------------------------------------------------- -[gpt-4o] -model_id = "pipelex/gpt-4o" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.75, output = 11.00 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -[gpt-4o-mini] -model_id = "pipelex/gpt-4o-mini" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.17, output = 0.66 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -["gpt-4.1"] -model_id = "pipelex/gpt-4.1" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2, output = 8 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -["gpt-4.1-mini"] -model_id = "pipelex/gpt-4.1-mini" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.4, output = 1.6 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -["gpt-4.1-nano"] -model_id = "pipelex/gpt-4.1-nano" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.1, output = 0.4 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -[gpt-5-nano] -model_id = "pipelex/gpt-5-nano" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.05, output = 0.40 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -[gpt-5-mini] -model_id = "pipelex/gpt-5-mini" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.25, output = 2.00 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -[gpt-5-chat] -model_id = "pipelex/gpt-5-chat" -inputs = ["text", "images"] -outputs = ["text"] -costs = { input = 1.25, output = 10.00 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -[gpt-5] -model_id = "pipelex/gpt-5" -inputs = ["text", "images"] -outputs = ["text"] -costs = { input = 1.25, output = 10.00 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -["gpt-5.1"] -model_id = "pipelex/gpt-5.1" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.00 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -["gpt-5.1-chat"] -model_id = "pipelex/gpt-5.1-chat" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.00 } -sdk = "openai_responses" -structure_method = "instructor/openai_responses_tools" - -# --- Claude LLMs -------------------------------------------------------------- -["claude-4-sonnet"] -model_id = "pipelex/claude-4-sonnet" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 3, output = 15 } - -["claude-4.1-opus"] -model_id = "pipelex/claude-4.1-opus" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 15, output = 75 } - -["claude-4.5-sonnet"] -model_id = "pipelex/claude-4.5-sonnet" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 3, output = 15 } - -["claude-4.5-haiku"] -model_id = "pipelex/claude-4.5-haiku" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1, output = 5 } - -["claude-4.5-opus"] -model_id = "pipelex/claude-4.5-opus" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 5, output = 25 } - -# --- Gemini LLMs -------------------------------------------------------------- -["gemini-2.0-flash"] -model_id = "pipelex/gemini-2.0-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.10, output = 0.40 } - -["gemini-2.5-pro"] -model_id = "pipelex/gemini-2.5-pro" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 1.25, output = 10.0 } - -["gemini-2.5-flash"] -model_id = "pipelex/gemini-2.5-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.30, output = 2.50 } - -["gemini-2.5-flash-lite"] -model_id = "pipelex/gemini-2.5-flash-lite" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.10, output = 0.40 } - -["gemini-3.0-pro"] -model_id = "pipelex/gemini-3.0-pro" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 2, output = 12.0 } - -# --- XAI LLMs -------------------------------------------------------------- - -[grok-3] -model_id = "grok-3" -inputs = ["text"] -outputs = ["text"] -costs = { input = 3, output = 15 } - -[grok-3-mini] -model_id = "grok-3-mini" -inputs = ["text"] -outputs = ["text"] -costs = { input = 0.3, output = 0.5 } - -################################################################################ -# OCR and IMAGE GENERATION MODELS -################################################################################ - -# We are still working in giving you acces to OCR and image generation models -# and to the best models from Mistral through the Pipelex Inference backend. diff --git a/.pipelex/inference/backends/portkey.toml b/.pipelex/inference/backends/portkey.toml deleted file mode 100644 index 75e2574..0000000 --- a/.pipelex/inference/backends/portkey.toml +++ /dev/null @@ -1,263 +0,0 @@ -################################################################################ -# Portkey Configuration -################################################################################ -# -# This file defines the model specifications for the Portkey backend. -# It contains model definitions for various AI models. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["gpt-4.1"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "portkey_completions" -structure_method = "instructor/openai_tools" -prompting_target = "anthropic" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- OpenAI LLMs -------------------------------------------------------------- -[gpt-4o-mini] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.6 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[gpt-4o] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2.5, output = 10.0 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -["gpt-4.1-nano"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.1, output = 0.4 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -["gpt-4.1-mini"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.4, output = 1.6 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -["gpt-4.1"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2, output = 8 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[o1] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 15.0, output = 60.0 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[o3-mini] -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 1.1, output = 4.4 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[o3] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 2, output = 8 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[o4-mini] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.1, output = 4.4 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[gpt-5-nano] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.05, output = 0.4 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[gpt-5-mini] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.25, output = 2.0 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -[gpt-5] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -["gpt-5.1"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -["gpt-5.1-codex"] -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 1.25, output = 10.0 } -valued_constraints = { fixed_temperature = 1 } -sdk = "portkey_responses" -structure_method = "instructor/openai_responses_tools" -x-portkey-provider = "@openai" - -# --- Claude LLMs -------------------------------------------------------------- -[claude-3-haiku] -model_id = "claude-3-haiku-20240307" -max_tokens = 4096 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 0.25, output = 1.25 } -x-portkey-provider = "@anthropic" - -["claude-3.7-sonnet"] -model_id = "claude-3-7-sonnet-20250219" -max_tokens = 8192 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } -x-portkey-provider = "@anthropic" - -[claude-4-sonnet] -model_id = "claude-sonnet-4-20250514" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } -x-portkey-provider = "@anthropic" - -[claude-4-opus] -model_id = "claude-opus-4-20250514" -max_tokens = 32000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } -x-portkey-provider = "@anthropic" - -["claude-4.1-opus"] -model_id = "claude-opus-4-1-20250805" -max_tokens = 32000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } -x-portkey-provider = "@anthropic" - -["claude-4.5-sonnet"] -model_id = "claude-sonnet-4-5-20250929" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 3.0, output = 15.0 } -x-portkey-provider = "@anthropic" - -["claude-4.5-haiku"] -model_id = "claude-haiku-4-5-20251001" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 1.0, output = 5.0 } -x-portkey-provider = "@anthropic" - -["claude-4.5-opus"] -model_id = "claude-opus-4-5-20251101" -max_tokens = 64000 -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 100 -costs = { input = 5.0, output = 25.0 } -x-portkey-provider = "@anthropic" - -# --- Gemini LLMs -------------------------------------------------------------- -["gemini-2.0-flash"] -model_id = "gemini-2.0-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.10, output = 0.40 } -x-portkey-provider = "@google" - -["gemini-2.5-pro"] -model_id = "gemini-2.5-pro" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 1.25, output = 10.0 } -x-portkey-provider = "@google" - -["gemini-2.5-flash"] -model_id = "gemini-2.5-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.30, output = 2.50 } -x-portkey-provider = "@google" - -["gemini-2.5-flash-lite"] -model_id = "gemini-2.5-flash-lite" -inputs = ["text", "images"] -outputs = ["text", "structured"] -costs = { input = 0.10, output = 0.40 } -x-portkey-provider = "@google" - -["gemini-3.0-pro"] -model_id = "gemini-3-pro-preview" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 2, output = 12.0 } -x-portkey-provider = "@google" diff --git a/.pipelex/inference/backends/scaleway.toml b/.pipelex/inference/backends/scaleway.toml deleted file mode 100644 index 20fe792..0000000 --- a/.pipelex/inference/backends/scaleway.toml +++ /dev/null @@ -1,67 +0,0 @@ -################################################################################ -# Groq Backend Configuration -################################################################################ -# -# This file defines the model specifications for Scaleway models. -# It contains model definitions for various LLM models accessible through -# the Groq API, including text-only and vision-capable models. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots or slashes must be quoted (e.g., ["meta-llama/llama-4-scout"]) -# - Model costs are in USD per million tokens (input/output) -# - Vision models support max 5 images per request, 33MP max resolution -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai" -structure_method = "instructor/json" - -# --- DeepSeek Models ---------------------------------------------------------- -[deepseek-r1-distill-llama-70b] -max_tokens = 32768 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.90, output = 0.90 } - -# --- Meta Llama 3.x Series ---------------------------------------------------- -["llama-3.1-8b-instruct"] -max_tokens = 131072 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.2, output = 0.2 } - -["llama-3.3-70b-instruct"] -max_tokens = 32768 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.90, output = 0.90 } - -# --- OpenAI GPT-OSS Models ---------------------------------------------------- -[gpt-oss-120b] -max_tokens = 65536 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.15, output = 0.60 } - -# --- Qwen 3 ------------------------------------------------------------------- -[qwen3-235b-a22b-instruct-2507] -max_tokens = 40960 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.75, output = 2.25 } - -[qwen3-coder-30b-a3b-instruct] -max_tokens = 40960 -inputs = ["text"] -outputs = ["text", "structured"] -costs = { input = 0.20, output = 0.80 } diff --git a/.pipelex/inference/backends/vertexai.toml b/.pipelex/inference/backends/vertexai.toml deleted file mode 100644 index 1ebab79..0000000 --- a/.pipelex/inference/backends/vertexai.toml +++ /dev/null @@ -1,54 +0,0 @@ -################################################################################ -# VertexAI Backend Configuration -################################################################################ -# -# This file defines the model specifications for Google VertexAI models. -# It contains model definitions for Gemini language models -# accessible through the Google VertexAI API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["gemini-2.0-flash"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai" -prompting_target = "gemini" -structure_method = "instructor/vertexai_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- Gemini 2.0 Series -------------------------------------------------------- -["gemini-2.0-flash"] -model_id = "google/gemini-2.0-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 0.1, output = 0.4 } - -# --- Gemini 2.5 Series -------------------------------------------------------- -["gemini-2.5-pro"] -model_id = "google/gemini-2.5-pro" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 1.25, output = 10.0 } - -["gemini-2.5-flash"] -model_id = "google/gemini-2.5-flash" -inputs = ["text", "images"] -outputs = ["text", "structured"] -max_prompt_images = 3000 -costs = { input = 0.30, output = 2.50 } diff --git a/.pipelex/inference/backends/xai.toml b/.pipelex/inference/backends/xai.toml deleted file mode 100644 index 3045344..0000000 --- a/.pipelex/inference/backends/xai.toml +++ /dev/null @@ -1,56 +0,0 @@ -################################################################################ -# XAI Backend Configuration -################################################################################ -# -# This file defines the model specifications for XAI (formerly Twitter AI) models. -# It contains model definitions for Grok language models -# accessible through the XAI API. -# -# Configuration structure: -# - Each model is defined in its own section with the model name as the header -# - Headers with dots must be quoted (e.g., ["grok-3"]) -# - Model costs are in USD per million tokens (input/output) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -################################################################################ - -################################################################################ -# MODEL DEFAULTS -################################################################################ - -[defaults] -model_type = "llm" -sdk = "openai" -prompting_target = "anthropic" -structure_method = "instructor/openai_tools" - -################################################################################ -# LANGUAGE MODELS -################################################################################ - -# --- Grok 3 Series ------------------------------------------------------------ -[grok-3] -model_id = "grok-3" -inputs = ["text"] -outputs = ["text"] -costs = { input = 3, output = 15 } - -[grok-3-mini] -model_id = "grok-3-mini" -inputs = ["text"] -outputs = ["text"] -costs = { input = 0.3, output = 0.5 } - -[grok-3-fast] -model_id = "grok-3-fast-latest" -inputs = ["text"] -outputs = ["text"] -costs = { input = 5, output = 25 } - -[grok-3-mini-fast] -model_id = "grok-3-mini-fast-latest" -inputs = ["text"] -outputs = ["text"] -costs = { input = 0.15, output = 4 } diff --git a/.pipelex/inference/deck/base_deck.toml b/.pipelex/inference/deck/base_deck.toml deleted file mode 100644 index 63ab39e..0000000 --- a/.pipelex/inference/deck/base_deck.toml +++ /dev/null @@ -1,202 +0,0 @@ -#################################################################################################### -# Pipelex Model Deck - Base Configuration -#################################################################################################### -# -# This file defines model aliases and presets for: -# - LLMs (language models for text generation and structured output) -# - Image generation models (for creating images from text prompts) -# - Document extraction models (OCR and text extraction from PDFs/images) -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -#################################################################################################### - -#################################################################################################### -# Aliases -#################################################################################################### - -[aliases] -base-claude = "claude-4.5-sonnet" -base-gpt = "gpt-4o" -base-gemini = "gemini-2.5-flash" -base-mistral = "mistral-medium" -base-groq = "llama-3.3-70b-versatile" -base-grok = "grok-4-fast-non-reasoning" - -best-gpt = "gpt-5.1" -best-claude = "claude-4.5-opus" -best-gemini = "gemini-3.0-pro" -best-mistral = "mistral-medium" - -# Groq-specific aliases -fast-groq = "llama-3.1-8b-instant" -vision-groq = "llama-4-scout-17b-16e-instruct" - -# Image generation aliases -base-img-gen = "flux-pro/v1.1" -best-img-gen = "flux-2" -fast-img-gen = "fast-lightning-sdxl" - -#################################################################################################### -# Waterfalls -#################################################################################################### - -[waterfalls] - -# --- Waterfalls for LLMs --------------------------------------------------------------------- -smart_llm = [ - "claude-4.5-opus", - "claude-4.5-sonnet", - "gemini-3.0-pro", - "gpt-5.1", - "claude-4.1-opus", - "gemini-2.5-pro", - "claude-4-sonnet", - "grok-4-fast-non-reasoning", -] -smart_llm_with_vision = [ - "claude-4.5-opus", - "claude-4.5-sonnet", - "gemini-3.0-pro", - "gpt-5.1", - "claude-4.1-opus", - "gemini-2.5-pro", - "claude-4-sonnet", - "grok-4-fast-non-reasoning", -] -smart_llm_for_structured = [ - "claude-4.5-opus", - "claude-4.5-sonnet", - "gemini-3.0-pro", - "gpt-5.1", - "claude-4.1-opus", - "claude-4-sonnet", -] -llm_for_creativity = [ - "claude-4.5-opus", - "claude-4.1-opus", - "gemini-2.5-pro", - "gpt-5.1", -] -llm_for_large_codebase = [ - "gemini-2.5-pro", - "claude-4.5-sonnet", - "gemini-3.0-pro", - "gpt-5.1", - "gemini-2.5-flash", - "grok-4-fast-non-reasoning", -] -cheap_llm = [ - "gpt-4o-mini", - "gemini-2.5-flash-lite", - "mistral-small", - "claude-3-haiku", - "grok-3-mini", -] -cheap_llm_for_vision = [ - "gemini-2.5-flash-lite", - "gpt-4o-mini", - "claude-3-haiku", -] -cheap_llm_for_structured = ["gpt-4o-mini", "mistral-small", "claude-3-haiku"] -cheap_llm_for_creativity = [ - "gemini-2.5-flash", - "grok-3-mini", - "gpt-4o-mini", - "claude-4.5-haiku", -] - -# --- Waterfalls for Extracts --------------------------------------------------------------------- -pdf_text_extractor = [ - "azure-document-intelligence", - "mistral-ocr", - "pypdfium2-extract-pdf", -] -image_text_extractor = ["mistral-ocr"] - -#################################################################################################### -# LLM Presets -#################################################################################################### - -[llm.presets] - -# LLM Presets — Specific skills ------------------------------------------------------------- - -# Generation skills -llm_for_factual_writing = { model = "base-gpt", temperature = 0.1 } -llm_for_creative_writing = { model = "base-gpt", temperature = 0.9 } -llm_for_writing_cheap = { model = "gpt-4o-mini", temperature = 0.3 } - -# Retrieve and answer questions skills -llm_to_answer_questions_cheap = { model = "gpt-4o-mini", temperature = 0.3 } -llm_to_answer_questions = { model = "base-claude", temperature = 0.3 } -llm_to_retrieve = { model = "base-claude", temperature = 0.1 } - -# Engineering skills -llm_to_engineer = { model = "smart_llm_for_structured", temperature = 0.2 } -llm_to_code = { model = "base-claude", temperature = 0.1 } -llm_to_analyze_large_codebase = { model = "base-claude", temperature = 0.1 } - -# Vision skills -llm_for_img_to_text_cheap = { model = "gpt-4o-mini", temperature = 0.1 } -llm_for_img_to_text = { model = "base-claude", temperature = 0.1 } -llm_for_diagram_to_text = { model = "best-claude", temperature = 0.3 } -llm_for_table_to_text = { model = "base-claude", temperature = 0.3 } - -# Image generation prompting skills -llm_to_prompt_img_gen = { model = "base-claude", temperature = 0.2 } -llm_to_prompt_img_gen_cheap = { model = "gpt-4o-mini", temperature = 0.5 } - -# Groq-specific presets (fast inference, low cost) -llm_groq_fast_text = { model = "fast-groq", temperature = 0.7 } -llm_groq_balanced = { model = "base-groq", temperature = 0.5 } -llm_groq_vision = { model = "vision-groq", temperature = 0.3 } - -# LLM Presets — For Testing --------------------------------------------------------------------- - -llm_for_testing_gen_text = { model = "cheap_llm", temperature = 0.5 } -llm_for_testing_gen_object = { model = "cheap_llm_for_structured", temperature = 0.1 } -llm_for_testing_vision = { model = "cheap_llm_for_vision", temperature = 0.5 } -llm_for_testing_vision_structured = { model = "cheap_llm_for_vision", temperature = 0.5 } - -#################################################################################################### -# LLM Choices -#################################################################################################### - -[llm.choice_defaults] -for_text = "cheap_llm" -for_object = "cheap_llm_for_structured" - -#################################################################################################### -# Extract Presets -#################################################################################################### - -[extract] -choice_default = "extract_ocr_from_document" - -[extract.presets] -extract_ocr_from_document = { model = "azure-document-intelligence", max_nb_images = 100, image_min_size = 50 } -extract_basic_from_pdf = { model = "pypdfium2-extract-pdf", max_nb_images = 100, image_min_size = 50 } - -#################################################################################################### -# Image Generation Presets -#################################################################################################### - -[img_gen] -choice_default = "gen_image_basic" - -[img_gen.presets] - -# General purpose -gen_image_basic = { model = "base-img-gen", quality = "medium", guidance_scale = 7.5, is_moderated = true, safety_tolerance = 3 } -gen_image_fast = { model = "fast-img-gen", nb_steps = 4, guidance_scale = 5.0, is_moderated = true, safety_tolerance = 3 } -gen_image_high_quality = { model = "best-img-gen", quality = "high", guidance_scale = 8.0, is_moderated = true, safety_tolerance = 3 } -gen_image_openai_low_quality = { model = "gpt-image-1", quality = "low" } - -# Specific skills -img_gen_for_art = { model = "best-img-gen", quality = "high", guidance_scale = 9.0, is_moderated = false, safety_tolerance = 5 } -img_gen_for_diagram = { model = "base-img-gen", quality = "medium", guidance_scale = 7.0, is_moderated = true, safety_tolerance = 2 } -img_gen_for_mockup = { model = "base-img-gen", quality = "medium", guidance_scale = 6.5, is_moderated = true, safety_tolerance = 3 } -img_gen_for_product = { model = "best-img-gen", quality = "high", guidance_scale = 8.5, is_moderated = true, safety_tolerance = 2 } -img_gen_for_testing = { model = "fast-img-gen", nb_steps = 4, guidance_scale = 4.0, is_moderated = true, safety_tolerance = 4 } diff --git a/.pipelex/inference/deck/overrides.toml b/.pipelex/inference/deck/overrides.toml deleted file mode 100644 index 08814db..0000000 --- a/.pipelex/inference/deck/overrides.toml +++ /dev/null @@ -1,19 +0,0 @@ -#################################################################################################### -# Pipelex Model Deck - Overrides -#################################################################################################### -# -# This file allows you to override the default model choices defined in base_deck.toml. -# You can customize presets for LLMs, image generation, and document extraction models. -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -#################################################################################################### - -#################################################################################################### -# LLM Deck overrides -#################################################################################################### - -[llm.choice_overrides] -for_text = "disabled" -for_object = "disabled" diff --git a/.pipelex/inference/routing_profiles.toml b/.pipelex/inference/routing_profiles.toml deleted file mode 100644 index bf40281..0000000 --- a/.pipelex/inference/routing_profiles.toml +++ /dev/null @@ -1,173 +0,0 @@ -# Routing profile library - Routes models to their backends -# ========================================================================================= -# This file controls which backend serves which model. -# Simply change the 'active' field to switch profiles, -# or you can add your own custom profiles. -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# ========================================================================================= - -# Which profile to use (change this to switch routing) -active = "pipelex_gateway_first" - -# We recommend using the "pipelex_gateway_first" profile to get a head start with all models. -# To use the Pipelex Gateway backend: -# 1. Join our Discord community to get your free API key (no credit card required): -# Visit https://go.pipelex.com/discord and request your key in the appropriate channel -# 2. Set the environment variable (or add it to your .env file): -# - Linux/macOS: export PIPELEX_GATEWAY_API_KEY="your-api-key" -# - Windows CMD: set PIPELEX_GATEWAY_API_KEY=your-api-key -# - Windows PowerShell: $env:PIPELEX_GATEWAY_API_KEY="your-api-key" -# 3. The .pipelex/inference/backends.toml is already configured with api_key = "${PIPELEX_GATEWAY_API_KEY}" -# which will get the key from the environment variable. - -# ========================================================================================= -# Routing Profiles -# ========================================================================================= - -[profiles.pipelex_gateway_first] -description = "Use Pipelex Gateway backend for all its supported models" -default = "pipelex_gateway" -fallback_order = [ - "pipelex_gateway", - "azure_openai", - "bedrock", - "google", - "blackboxai", - "mistral", - "fal", -] - -[profiles.pipelex_gateway_first.routes] -# Pattern matching: "model-pattern" = "backend-name" - -[profiles.pipelex_gateway_first.optional_routes] # Each optional route is considered only if its backend is available -"gpt-*" = "pipelex_gateway" -"gpt-image-1" = "openai" -"claude-*" = "pipelex_gateway" -"grok-*" = "pipelex_gateway" -"gemini-*" = "pipelex_gateway" -"*-sdxl" = "fal" -"flux-*" = "fal" -"mistral-ocr" = "mistral" - -[profiles.all_pipelex_gateway] -description = "Use Pipelex Gateway for all its supported models" -default = "pipelex_gateway" - -[profiles.all_anthropic] -description = "Use Anthropic backend for all its supported models" -default = "anthropic" - -[profiles.all_azure_openai] -description = "Use Azure OpenAI backend for all its supported models" -default = "azure_openai" - -[profiles.all_bedrock] -description = "Use Bedrock backend for all its supported models" -default = "bedrock" - -[profiles.all_blackboxai] -description = "Use BlackBoxAI backend for all its supported models" -default = "blackboxai" - -[profiles.all_fal] -description = "Use FAL backend for all its supported models" -default = "fal" - -[profiles.all_google] -description = "Use Google GenAI backend for all its supported models" -default = "google" - -[profiles.all_groq] -description = "Use groq backend for all its supported models" -default = "groq" - -[profiles.all_huggingface] -description = "Use HuggingFace backend for all its supported models" -default = "huggingface" - -[profiles.all_mistral] -description = "Use Mistral backend for all its supported models" -default = "mistral" - -[profiles.all_ollama] -description = "Use Ollama backend for all its supported models" -default = "ollama" - -[profiles.all_openai] -description = "Use OpenAI backend for all its supported models" -default = "openai" - -[profiles.all_portkey] -description = "Use Portkey backend for all its supported models" -default = "portkey" - -[profiles.all_scaleway] -description = "Use Scaleway backend for all its supported models" -default = "scaleway" - -[profiles.all_xai] -description = "Use xAI backend for all its supported models" -default = "xai" - -[profiles.all_internal] -description = "Use internal backend for all its supported models" -default = "internal" - -# ========================================================================================= -# Custom Profiles -# ========================================================================================= -# Add your own profiles below following the same pattern: -# -# [profiles.your_profile_name] -# description = "What this profile does" -# default = "backend-name" # Where to route models by default -# [profiles.your_profile_name.routes] -# "model-pattern" = "backend-name" # Specific routing rules -# -# Pattern matching supports: -# - Exact names: "gpt-4o-mini" -# - Wildcards: "claude-*" (matches all models starting with claude-) -# - Partial wildcards: "*-sonnet" (matches all sonnet variants) - -# ========================================================================================= -# Example of a custom routing profile with mostly pattern matching and one specific model -# ========================================================================================= -[profiles.example_routing_using_patterns] -description = "Example routing profile using patterns" -default = "pipelex_gateway" - -[profiles.example_routing_using_patterns.routes] -# Pattern matching: "model-pattern" = "backend-name" -"gpt-*" = "azure_openai" -"claude-*" = "bedrock" -"gemini-*" = "google" -"grok-*" = "xai" -"*-sdxl" = "fal" -"flux-*" = "fal" -"gpt-image-1" = "openai" - -# ========================================================================================= -# Example of a custom routing profile with specific model matching -# ========================================================================================= - -[profiles.example_routing_using_specific_models] -description = "Example routing profile using specific models" - -[profiles.example_routing_using_specific_models.routes] -"gpt-5-nano" = "pipelex_gateway" -"gpt-4o-mini" = "blackboxai" -"gpt-5-mini" = "openai" -"gpt-5-chat" = "azure_openai" - -"claude-4-sonnet" = "pipelex_gateway" -"claude-3.7-sonnet" = "blackboxai" - -"gemini-2.5-flash-lite" = "pipelex_gateway" -"gemini-2.5-flash" = "blackboxai" -"gemini-2.5-pro" = "vertexai" - -"grok-3" = "pipelex_gateway" -"grok-3-mini" = "xai" diff --git a/.pipelex/pipelex.toml b/.pipelex/pipelex.toml deleted file mode 100644 index 4a2ea38..0000000 --- a/.pipelex/pipelex.toml +++ /dev/null @@ -1,162 +0,0 @@ -#################################################################################################### -# Pipelex Configuration File -#################################################################################################### -# -# This configuration file is copied to client projects' .pipelex/ directory when running: -# `pipelex init config` -# -# Purpose: -# - This file allows to override Pipelex's default settings for specific projects -# - Feel free to modify any settings below to suit your needs -# - You can add any configuration sections that exist in the main pipelex.toml -# -# Finding Available Settings: -# - See the full default configuration in: pipelex/pipelex.toml (in the Pipelex package) -# - See the configuration structure classes in: pipelex/config.py and pipelex/cogt/config_cogt.py -# -# Common customizations are proposed below, such as: -# - Logging levels and behavior -# - Excluded directories for scanning -# - LLM prompt dumping for debugging -# - Feature flags for tracking and reporting -# - Observer and reporting output directories -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -#################################################################################################### - -[pipelex.pipeline_execution_config] -# Uncomment to disable conversion of data URLs to pipelex-storage:// URIs -# is_normalize_data_urls_to_storage = false - -[pipelex.pipeline_execution_config.graph_config.data_inclusion] -# Uncomment to include stuff data in graph outputs: -stuff_json_content = true -stuff_text_content = true -stuff_html_content = true -error_stack_traces = true - -[pipelex.pipeline_execution_config.graph_config.graphs_inclusion] -# Uncomment to customize which graph outputs are generated (all enabled by default): -# graphspec_json = false -# mermaidflow_mmd = false -# mermaidflow_html = false -# reactflow_viewspec = false -# reactflow_html = false - -[pipelex.pipeline_execution_config.graph_config.reactflow_config] -# Uncomment to customize ReactFlow graph rendering: -# edge_type = "bezier" # Options: "bezier", "smoothstep", "step", "straight" -# nodesep = 50 # Horizontal spacing between nodes -# ranksep = 80 # Vertical spacing between ranks/levels -# initial_zoom = 1.0 # Initial zoom level (1.0 = 100%) -# pan_to_top = true # Pan to show top of graph on load - -[pipelex.storage_config] -# Storage method: "local" (default), "in_memory", "s3", or "gcp" -# method = "local" - -# Whether to fetch remote HTTP URLs and store them locally -# is_fetch_remote_content_enabled = true - -# --- Local Storage Configuration --- -# Uncomment to customize local storage settings: -[pipelex.storage_config.local] -# uri_format = "{primary_id}/{secondary_id}/{hash}.{extension}" -# local_storage_path = ".pipelex/storage" - -# --- AWS S3 Storage Configuration --- -# Uncomment to use S3 storage (requires boto3: `pip install pipelex[s3]`): -[pipelex.storage_config.s3] -# uri_format = "{primary_id}/{secondary_id}/{hash}.{extension}" -# bucket_name = "your-bucket-name" -# region = "us-east-1" -# signed_urls_lifespan_seconds = 3600 # Set to "disabled" for public URLs - -# --- Google Cloud Storage Configuration --- -# Uncomment to use GCP storage (requires google-cloud-storage: `pip install pipelex[gcp-storage]`): -[pipelex.storage_config.gcp] -# uri_format = "{primary_id}/{secondary_id}/{hash}.{extension}" -# bucket_name = "your-bucket-name" -# project_id = "your-project-id" -# signed_urls_lifespan_seconds = 3600 # Set to "disabled" for public URLs - -[pipelex.scan_config] -# Uncomment to customize the excluded directories for scanning -# excluded_dirs = [ -# ".venv", -# "venv", -# "env", -# ".env", -# "virtualenv", -# ".virtualenv", -# ".git", -# "__pycache__", -# ".pytest_cache", -# ".mypy_cache", -# ".ruff_cache", -# "node_modules", -# "results", -# ] - -[pipelex.builder_config] -# Uncomment to change where the generated pipelines are saved: -# default_output_dir = "." -# default_bundle_file_name = "bundle" -# default_directory_base_name = "pipeline" - -[pipelex.log_config] -# Uncomment to change the default log level: -# default_log_level = "INFO" - -# Uncomment to log to stderr instead of stdout -# console_log_target = "stderr" -# console_print_target = "stderr" - -[pipelex.log_config.package_log_levels] -# Uncomment to change the log level for specific packages: -# pipelex = "INFO" - -[pipelex.observer_config] -# Uncomment to change the directory where the observer will save its results: -# observer_dir = "results/observer" - -[pipelex.feature_config] -# WIP/Experimental feature flags: -# is_pipeline_tracking_enabled = false -# is_reporting_enabled = true - -[pipelex.reporting_config] -# Uncomment to customize the reporting configuration: -# is_log_costs_to_console = false -# is_generate_cost_report_file_enabled = false -# cost_report_dir_path = "reports" -# cost_report_base_name = "cost_report" -# cost_report_extension = "csv" -# cost_report_unit_scale = 1.0 - -[cogt] -[cogt.model_deck_config] -# Uncomment to disable model fallback: it will raise errors instead of using secondary model options: -# is_model_fallback_enabled = false -# Uncomment to change the reaction to missing presets: "raise" (default), "log" or "none" -# missing_presets_reaction = "raise" - -[cogt.tenacity_config] -# Uncomment to change those values as needed: -# max_retries = 50 # Maximum number of retry attempts before giving up -# wait_multiplier = 0.2 # Multiplier applied to the wait time between retries (in seconds) -# wait_max = 20 # Maximum wait time between retries (in seconds) -# wait_exp_base = 1.3 # Base for exponential backoff calculation - -[cogt.llm_config] -# Uncomment any of these to enable dumping the inputs or outputs of text-generation with an LLM: -# is_dump_text_prompts_enabled = true -# is_dump_response_text_enabled = true - -[cogt.llm_config.instructor_config] -# Uncomment any of these to enable dumping the kwargs, response or errors when generating structured content: -# is_dump_kwargs_enabled = true -# is_dump_response_enabled = true -# is_dump_error_enabled = true diff --git a/.pipelex/pipelex_service.toml b/.pipelex/pipelex_service.toml deleted file mode 100644 index afe39a2..0000000 --- a/.pipelex/pipelex_service.toml +++ /dev/null @@ -1,19 +0,0 @@ -#################################################################################################### -# Pipelex Service Configuration -#################################################################################################### -# -# This file stores settings related to Pipelex managed services. -# Currently used for Pipelex Gateway terms acceptance. -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -#################################################################################################### - -[agreement] -# Set to true after accepting Pipelex terms of service. -terms_accepted = true - -# Note: when using pipelex_gateway, telemetry is enabled to monitor service usage. -# We collect technical data (model, pipe type...) and quantitative data (token counts...) -# but NOT your content, pipe codes, or output class names. diff --git a/.pipelex/telemetry.toml b/.pipelex/telemetry.toml deleted file mode 100644 index eb2c537..0000000 --- a/.pipelex/telemetry.toml +++ /dev/null @@ -1,92 +0,0 @@ -#################################################################################################### -# Custom Telemetry Configuration -#################################################################################################### -# -# This file controls YOUR custom telemetry settings for observability and analytics. -# Configure your own PostHog, Langfuse, or OTLP-compatible backends here. -# -# NOTE: When using Pipelex Gateway, identified telemetry is automatically enabled -# (tied to your Gateway API key, hashed for security). This allows us to monitor -# service quality, enforce fair usage, and provide you with better support. -# Gateway telemetry operates independently from your settings below - you can have both! -# -# To disable all telemetry, set the DO_NOT_TRACK=1 environment variable. -# -# Documentation: https://docs.pipelex.com -# Support: https://go.pipelex.com/discord -# -#################################################################################################### - -# ────────────────────────────────────────────────────────────────────────────── -# PostHog Configuration (Event tracking + AI span tracing) -# ────────────────────────────────────────────────────────────────────────────── - -[custom_posthog] -mode = "off" # Values: "off" | "anonymous" | "identified" -# user_id = "your_user_id" # Required when mode = "identified" -endpoint = "${POSTHOG_ENDPOINT}" # Default: https://us.i.posthog.com (or https://eu.i.posthog.com for EU) -api_key = "${POSTHOG_API_KEY}" # Get from PostHog Project Settings -geoip = true # Enable GeoIP lookup -debug = false # Enable PostHog debug mode -redact_properties = [ - "prompt", - "system_prompt", - "response", - "file_path", - "url", -] # Event properties to redact - -# AI span tracing to YOUR PostHog (does NOT affect Langfuse/OTLP - they receive full data) -[custom_posthog.tracing] -enabled = false # Send AI spans to your PostHog - -# Privacy controls for data sent to YOUR PostHog only -[custom_posthog.tracing.capture] -content = false # Capture prompt/completion content -# content_max_length = 1000 # Max length for captured content (omit for unlimited) -pipe_codes = false # Include pipe codes in span names/attributes -output_class_names = false # Include output class names in span names/attributes - -# ────────────────────────────────────────────────────────────────────────────── -# Portkey SDK Configuration -# ────────────────────────────────────────────────────────────────────────────── - -[custom_portkey] -force_debug_enabled = false -force_tracing_enabled = false - -# ────────────────────────────────────────────────────────────────────────────── -# Langfuse Integration -# Note: Langfuse receives FULL span data (no redaction) -# ────────────────────────────────────────────────────────────────────────────── - -[langfuse] -enabled = false -# endpoint = "https://cloud.langfuse.com" # Override for self-hosted Langfuse -# public_key = "${LANGFUSE_PUBLIC_KEY}" # Langfuse public key -# secret_key = "${LANGFUSE_SECRET_KEY}" # Langfuse secret key - -# ────────────────────────────────────────────────────────────────────────────── -# Additional OTLP Exporters (array for multiple) -# Note: OTLP exporters receive FULL span data (no redaction) -# ────────────────────────────────────────────────────────────────────────────── - -# [[otlp]] -# name = "my-collector" # Identifier for logging -# endpoint = "https://..." # OTLP endpoint URL -# headers = { Authorization = "Bearer ${OTLP_AUTH_TOKEN}" } # Headers for OTLP export - -# ────────────────────────────────────────────────────────────────────────────── -# Custom Telemetry Allowed Modes -# Controls which integration modes can use custom telemetry settings above. -# ────────────────────────────────────────────────────────────────────────────── - -[telemetry_allowed_modes] -ci = false # CI environments don't use custom telemetry -cli = true # CLI usage allows custom telemetry -docker = true # Docker deployments allow custom telemetry -fastapi = true # FastAPI integrations allow custom telemetry -mcp = true # MCP integrations allow custom telemetry -n8n = true # n8n integrations allow custom telemetry -pytest = false # Tests don't use custom telemetry -python = false # Direct Python SDK usage doesn't use custom telemetry by default diff --git a/.windsurfrules.md b/.windsurfrules.md index 6125caa..af4572e 100644 --- a/.windsurfrules.md +++ b/.windsurfrules.md @@ -23,10 +23,10 @@ A pipeline file has three main sections: #### Domain Statement ```plx -domain = "domain_name" +domain = "domain_code" description = "Description of the domain" # Optional ``` -Note: The domain name usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. +Note: The domain code usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. #### Concept Definitions @@ -62,7 +62,7 @@ For details on how to structure concepts with fields, see the "Structuring Model ### Pipe Base Definition ```plx -[pipe.your_pipe_name] +[pipe.your_pipe_code] type = "PipeLLM" description = "A description of what your pipe does" inputs = { input_1 = "ConceptName1", input_2 = "ConceptName2" } @@ -471,7 +471,7 @@ The PipeExtract operator is used to extract text and images from an image or a P [pipe.extract_info] type = "PipeExtract" description = "extract the information" -inputs = { document = "PDF" } # or { image = "Image" } if it's an image. This is the only input. +inputs = { document = "Document" } # or { image = "Image" } if it's an image. This is the only input. output = "Page" ``` @@ -480,7 +480,7 @@ Using Extract Model Settings: [pipe.extract_with_model] type = "PipeExtract" description = "Extract with specific model" -inputs = { document = "PDF" } +inputs = { document = "Document" } output = "Page" model = "base_extract_mistral" # Use predefined extract preset or model alias ``` @@ -588,15 +588,16 @@ $sales_rep.phone | $sales_rep.email """ ``` -#### Key Parameters +#### Key Parameters (Template Mode) -- `template`: Inline template string (mutually exclusive with template_name) +- `template`: Inline template string (mutually exclusive with template_name and construct) - `template_name`: Name of a predefined template (mutually exclusive with template) - `template_category`: Template type ("llm_prompt", "html", "markdown", "mermaid", etc.) - `templating_style`: Styling options for template rendering - `extra_context`: Additional context variables for template For more control, you can use a nested `template` section instead of the `template` field: + - `template.template`: The template string - `template.category`: Template type - `template.templating_style`: Styling options @@ -604,9 +605,143 @@ For more control, you can use a nested `template` section instead of the `templa #### Template Variables Use the same variable insertion rules as PipeLLM: + - `@variable` for block insertion (multi-line content) - `$variable` for inline insertion (short text) +#### Construct Mode (for StructuredContent Output) + +PipeCompose can also generate `StructuredContent` objects using the `construct` section. This mode composes field values from fixed values, variable references, templates, or nested structures. + +**When to use construct mode:** + +- You need to output a structured object (not just Text) +- You want to deterministically compose fields from existing data +- No LLM is needed - just data composition and templating + +##### Basic Construct Usage + +```plx +[concept.SalesSummary] +description = "A structured sales summary" + +[concept.SalesSummary.structure] +report_title = { type = "text", description = "Title of the report" } +customer_name = { type = "text", description = "Customer name" } +deal_value = { type = "number", description = "Deal value" } +summary_text = { type = "text", description = "Generated summary text" } + +[pipe.compose_summary] +type = "PipeCompose" +description = "Compose a sales summary from deal data" +inputs = { deal = "Deal" } +output = "SalesSummary" + +[pipe.compose_summary.construct] +report_title = "Monthly Sales Report" +customer_name = { from = "deal.customer_name" } +deal_value = { from = "deal.amount" } +summary_text = { template = "Deal worth $deal.amount with $deal.customer_name" } +``` + +##### Field Composition Methods + +There are four ways to define field values in a construct: + +**1. Fixed Value (literal)** + +Use a literal value directly: + +```plx +[pipe.compose_report.construct] +report_title = "Annual Report" +report_year = 2024 +is_draft = false +``` + +**2. Variable Reference (`from`)** + +Get a value from working memory using a dotted path: + +```plx +[pipe.compose_report.construct] +customer_name = { from = "deal.customer_name" } +total_amount = { from = "order.total" } +street_address = { from = "customer.address.street" } +``` + +**3. Template (`template`)** + +Render a Jinja2 template with variable substitution: + +```plx +[pipe.compose_report.construct] +invoice_number = { template = "INV-$order.id" } +summary = { template = "Deal worth $deal.amount with $deal.customer_name on {{ current_date }}" } +``` + +**4. Nested Construct** + +For nested structures, use a TOML subsection: + +```plx +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Complete Construct Example + +```plx +domain = "invoicing" + +[concept.Address] +description = "A postal address" + +[concept.Address.structure] +street = { type = "text", description = "Street address" } +city = { type = "text", description = "City name" } +country = { type = "text", description = "Country name" } + +[concept.Invoice] +description = "An invoice document" + +[concept.Invoice.structure] +invoice_number = { type = "text", description = "Invoice number" } +total = { type = "number", description = "Total amount" } + +[pipe.compose_invoice] +type = "PipeCompose" +description = "Compose an invoice from order and customer data" +inputs = { order = "Order", customer = "Customer" } +output = "Invoice" + +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Key Parameters (Construct Mode) + +- `construct`: Dictionary mapping field names to their composition rules +- Each field can be: + - A literal value (string, number, boolean) + - A dict with `from` key for variable reference + - A dict with `template` key for template rendering + - A nested dict for nested structures + +**Note:** You must use either `template` or `construct`, not both. They are mutually exclusive. + ### PipeImgGen operator The PipeImgGen operator is used to generate images using AI image generation models. @@ -952,13 +1087,13 @@ So here are a few concrete examples of calls to execute_pipeline with various wa }, ) -## Here we have a single input and it's a PDF. -## Because PDFContent is a native concept, we can use it directly as a value, +## Here we have a single input and it's a document. +## Because DocumentContent is a native concept, we can use it directly as a value, ## the system knows what content it corresponds to: pipe_output = await execute_pipeline( pipe_code="power_extractor_dpe", inputs={ - "document": PDFContent(url=pdf_url), + "document": DocumentContent(url=pdf_url), }, ) @@ -1081,82 +1216,4 @@ result_list = pipe_output.main_stuff_as_items(item_type=GanttChart) ``` --- - -## Rules to choose LLM models used in PipeLLMs. - -### LLM Configuration System - -In order to use it in a pipe, an LLM is referenced by its llm_handle (alias) and possibly by an llm_preset. -LLM configurations are managed through the new inference backend system with files located in `.pipelex/inference/`: - -- **Model Deck**: `.pipelex/inference/deck/base_deck.toml` and `.pipelex/inference/deck/overrides.toml` -- **Backends**: `.pipelex/inference/backends.toml` and `.pipelex/inference/backends/*.toml` -- **Routing**: `.pipelex/inference/routing_profiles.toml` - -### LLM Handles - -An llm_handle can be either: -1. **A direct model name** (like "gpt-4o-mini", "claude-3-sonnet") - automatically available for all models loaded by the inference backend system -2. **An alias** - user-defined shortcuts that map to model names, defined in the `[aliases]` section: - -```toml -[aliases] -base-claude = "claude-4.5-sonnet" -base-gpt = "gpt-5" -base-gemini = "gemini-2.5-flash" -base-mistral = "mistral-medium" -``` - -The system first looks for direct model names, then checks aliases if no direct match is found. The system handles model routing through backends automatically. - -### Using an LLM Handle in a PipeLLM - -Here is an example of using an llm_handle to specify which LLM to use in a PipeLLM: - -```plx -[pipe.hello_world] -type = "PipeLLM" -description = "Write text about Hello World." -output = "Text" -model = { model = "gpt-5", temperature = 0.9 } -prompt = """ -Write a haiku about Hello World. -""" -``` - -As you can see, to use the LLM, you must also indicate the temperature (float between 0 and 1) and max_tokens (either an int or the string "auto"). - -### LLM Presets - -Presets are meant to record the choice of an llm with its hyper parameters (temperature and max_tokens) if it's good for a particular task. LLM Presets are skill-oriented. - -Examples: -```toml -llm_to_engineer = { model = "base-claude", temperature = 1 } -llm_to_extract_invoice = { model = "claude-4.5-sonnet", temperature = 0.1, max_tokens = "auto" } -``` - -The interest is that these presets can be used to set the LLM choice in a PipeLLM, like this: - -```plx -[pipe.extract_invoice] -type = "PipeLLM" -description = "Extract invoice information from an invoice text transcript" -inputs = { invoice_text = "InvoiceText" } -output = "Invoice" -model = "llm_to_extract_invoice" -prompt = """ -Extract invoice information from this invoice: - -The category of this invoice is: $invoice_details.category. - -@invoice_text -""" -``` - -The setting here `model = "llm_to_extract_invoice"` works because "llm_to_extract_invoice" has been declared as an llm_preset in the deck. -You must not use an LLM preset in a PipeLLM that does not exist in the deck. If needed, you can add llm presets. - - -You can override the predefined llm presets by setting them in `.pipelex/inference/deck/overrides.toml`. diff --git a/AGENTS.md b/AGENTS.md index 6125caa..af4572e 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -23,10 +23,10 @@ A pipeline file has three main sections: #### Domain Statement ```plx -domain = "domain_name" +domain = "domain_code" description = "Description of the domain" # Optional ``` -Note: The domain name usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. +Note: The domain code usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. #### Concept Definitions @@ -62,7 +62,7 @@ For details on how to structure concepts with fields, see the "Structuring Model ### Pipe Base Definition ```plx -[pipe.your_pipe_name] +[pipe.your_pipe_code] type = "PipeLLM" description = "A description of what your pipe does" inputs = { input_1 = "ConceptName1", input_2 = "ConceptName2" } @@ -471,7 +471,7 @@ The PipeExtract operator is used to extract text and images from an image or a P [pipe.extract_info] type = "PipeExtract" description = "extract the information" -inputs = { document = "PDF" } # or { image = "Image" } if it's an image. This is the only input. +inputs = { document = "Document" } # or { image = "Image" } if it's an image. This is the only input. output = "Page" ``` @@ -480,7 +480,7 @@ Using Extract Model Settings: [pipe.extract_with_model] type = "PipeExtract" description = "Extract with specific model" -inputs = { document = "PDF" } +inputs = { document = "Document" } output = "Page" model = "base_extract_mistral" # Use predefined extract preset or model alias ``` @@ -588,15 +588,16 @@ $sales_rep.phone | $sales_rep.email """ ``` -#### Key Parameters +#### Key Parameters (Template Mode) -- `template`: Inline template string (mutually exclusive with template_name) +- `template`: Inline template string (mutually exclusive with template_name and construct) - `template_name`: Name of a predefined template (mutually exclusive with template) - `template_category`: Template type ("llm_prompt", "html", "markdown", "mermaid", etc.) - `templating_style`: Styling options for template rendering - `extra_context`: Additional context variables for template For more control, you can use a nested `template` section instead of the `template` field: + - `template.template`: The template string - `template.category`: Template type - `template.templating_style`: Styling options @@ -604,9 +605,143 @@ For more control, you can use a nested `template` section instead of the `templa #### Template Variables Use the same variable insertion rules as PipeLLM: + - `@variable` for block insertion (multi-line content) - `$variable` for inline insertion (short text) +#### Construct Mode (for StructuredContent Output) + +PipeCompose can also generate `StructuredContent` objects using the `construct` section. This mode composes field values from fixed values, variable references, templates, or nested structures. + +**When to use construct mode:** + +- You need to output a structured object (not just Text) +- You want to deterministically compose fields from existing data +- No LLM is needed - just data composition and templating + +##### Basic Construct Usage + +```plx +[concept.SalesSummary] +description = "A structured sales summary" + +[concept.SalesSummary.structure] +report_title = { type = "text", description = "Title of the report" } +customer_name = { type = "text", description = "Customer name" } +deal_value = { type = "number", description = "Deal value" } +summary_text = { type = "text", description = "Generated summary text" } + +[pipe.compose_summary] +type = "PipeCompose" +description = "Compose a sales summary from deal data" +inputs = { deal = "Deal" } +output = "SalesSummary" + +[pipe.compose_summary.construct] +report_title = "Monthly Sales Report" +customer_name = { from = "deal.customer_name" } +deal_value = { from = "deal.amount" } +summary_text = { template = "Deal worth $deal.amount with $deal.customer_name" } +``` + +##### Field Composition Methods + +There are four ways to define field values in a construct: + +**1. Fixed Value (literal)** + +Use a literal value directly: + +```plx +[pipe.compose_report.construct] +report_title = "Annual Report" +report_year = 2024 +is_draft = false +``` + +**2. Variable Reference (`from`)** + +Get a value from working memory using a dotted path: + +```plx +[pipe.compose_report.construct] +customer_name = { from = "deal.customer_name" } +total_amount = { from = "order.total" } +street_address = { from = "customer.address.street" } +``` + +**3. Template (`template`)** + +Render a Jinja2 template with variable substitution: + +```plx +[pipe.compose_report.construct] +invoice_number = { template = "INV-$order.id" } +summary = { template = "Deal worth $deal.amount with $deal.customer_name on {{ current_date }}" } +``` + +**4. Nested Construct** + +For nested structures, use a TOML subsection: + +```plx +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Complete Construct Example + +```plx +domain = "invoicing" + +[concept.Address] +description = "A postal address" + +[concept.Address.structure] +street = { type = "text", description = "Street address" } +city = { type = "text", description = "City name" } +country = { type = "text", description = "Country name" } + +[concept.Invoice] +description = "An invoice document" + +[concept.Invoice.structure] +invoice_number = { type = "text", description = "Invoice number" } +total = { type = "number", description = "Total amount" } + +[pipe.compose_invoice] +type = "PipeCompose" +description = "Compose an invoice from order and customer data" +inputs = { order = "Order", customer = "Customer" } +output = "Invoice" + +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Key Parameters (Construct Mode) + +- `construct`: Dictionary mapping field names to their composition rules +- Each field can be: + - A literal value (string, number, boolean) + - A dict with `from` key for variable reference + - A dict with `template` key for template rendering + - A nested dict for nested structures + +**Note:** You must use either `template` or `construct`, not both. They are mutually exclusive. + ### PipeImgGen operator The PipeImgGen operator is used to generate images using AI image generation models. @@ -952,13 +1087,13 @@ So here are a few concrete examples of calls to execute_pipeline with various wa }, ) -## Here we have a single input and it's a PDF. -## Because PDFContent is a native concept, we can use it directly as a value, +## Here we have a single input and it's a document. +## Because DocumentContent is a native concept, we can use it directly as a value, ## the system knows what content it corresponds to: pipe_output = await execute_pipeline( pipe_code="power_extractor_dpe", inputs={ - "document": PDFContent(url=pdf_url), + "document": DocumentContent(url=pdf_url), }, ) @@ -1081,82 +1216,4 @@ result_list = pipe_output.main_stuff_as_items(item_type=GanttChart) ``` --- - -## Rules to choose LLM models used in PipeLLMs. - -### LLM Configuration System - -In order to use it in a pipe, an LLM is referenced by its llm_handle (alias) and possibly by an llm_preset. -LLM configurations are managed through the new inference backend system with files located in `.pipelex/inference/`: - -- **Model Deck**: `.pipelex/inference/deck/base_deck.toml` and `.pipelex/inference/deck/overrides.toml` -- **Backends**: `.pipelex/inference/backends.toml` and `.pipelex/inference/backends/*.toml` -- **Routing**: `.pipelex/inference/routing_profiles.toml` - -### LLM Handles - -An llm_handle can be either: -1. **A direct model name** (like "gpt-4o-mini", "claude-3-sonnet") - automatically available for all models loaded by the inference backend system -2. **An alias** - user-defined shortcuts that map to model names, defined in the `[aliases]` section: - -```toml -[aliases] -base-claude = "claude-4.5-sonnet" -base-gpt = "gpt-5" -base-gemini = "gemini-2.5-flash" -base-mistral = "mistral-medium" -``` - -The system first looks for direct model names, then checks aliases if no direct match is found. The system handles model routing through backends automatically. - -### Using an LLM Handle in a PipeLLM - -Here is an example of using an llm_handle to specify which LLM to use in a PipeLLM: - -```plx -[pipe.hello_world] -type = "PipeLLM" -description = "Write text about Hello World." -output = "Text" -model = { model = "gpt-5", temperature = 0.9 } -prompt = """ -Write a haiku about Hello World. -""" -``` - -As you can see, to use the LLM, you must also indicate the temperature (float between 0 and 1) and max_tokens (either an int or the string "auto"). - -### LLM Presets - -Presets are meant to record the choice of an llm with its hyper parameters (temperature and max_tokens) if it's good for a particular task. LLM Presets are skill-oriented. - -Examples: -```toml -llm_to_engineer = { model = "base-claude", temperature = 1 } -llm_to_extract_invoice = { model = "claude-4.5-sonnet", temperature = 0.1, max_tokens = "auto" } -``` - -The interest is that these presets can be used to set the LLM choice in a PipeLLM, like this: - -```plx -[pipe.extract_invoice] -type = "PipeLLM" -description = "Extract invoice information from an invoice text transcript" -inputs = { invoice_text = "InvoiceText" } -output = "Invoice" -model = "llm_to_extract_invoice" -prompt = """ -Extract invoice information from this invoice: - -The category of this invoice is: $invoice_details.category. - -@invoice_text -""" -``` - -The setting here `model = "llm_to_extract_invoice"` works because "llm_to_extract_invoice" has been declared as an llm_preset in the deck. -You must not use an LLM preset in a PipeLLM that does not exist in the deck. If needed, you can add llm presets. - - -You can override the predefined llm presets by setting them in `.pipelex/inference/deck/overrides.toml`. diff --git a/CLAUDE.md b/CLAUDE.md index 6125caa..af4572e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -23,10 +23,10 @@ A pipeline file has three main sections: #### Domain Statement ```plx -domain = "domain_name" +domain = "domain_code" description = "Description of the domain" # Optional ``` -Note: The domain name usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. +Note: The domain code usually matches the plx filename for single-file domains. For multi-file domains, use the subdirectory name. #### Concept Definitions @@ -62,7 +62,7 @@ For details on how to structure concepts with fields, see the "Structuring Model ### Pipe Base Definition ```plx -[pipe.your_pipe_name] +[pipe.your_pipe_code] type = "PipeLLM" description = "A description of what your pipe does" inputs = { input_1 = "ConceptName1", input_2 = "ConceptName2" } @@ -471,7 +471,7 @@ The PipeExtract operator is used to extract text and images from an image or a P [pipe.extract_info] type = "PipeExtract" description = "extract the information" -inputs = { document = "PDF" } # or { image = "Image" } if it's an image. This is the only input. +inputs = { document = "Document" } # or { image = "Image" } if it's an image. This is the only input. output = "Page" ``` @@ -480,7 +480,7 @@ Using Extract Model Settings: [pipe.extract_with_model] type = "PipeExtract" description = "Extract with specific model" -inputs = { document = "PDF" } +inputs = { document = "Document" } output = "Page" model = "base_extract_mistral" # Use predefined extract preset or model alias ``` @@ -588,15 +588,16 @@ $sales_rep.phone | $sales_rep.email """ ``` -#### Key Parameters +#### Key Parameters (Template Mode) -- `template`: Inline template string (mutually exclusive with template_name) +- `template`: Inline template string (mutually exclusive with template_name and construct) - `template_name`: Name of a predefined template (mutually exclusive with template) - `template_category`: Template type ("llm_prompt", "html", "markdown", "mermaid", etc.) - `templating_style`: Styling options for template rendering - `extra_context`: Additional context variables for template For more control, you can use a nested `template` section instead of the `template` field: + - `template.template`: The template string - `template.category`: Template type - `template.templating_style`: Styling options @@ -604,9 +605,143 @@ For more control, you can use a nested `template` section instead of the `templa #### Template Variables Use the same variable insertion rules as PipeLLM: + - `@variable` for block insertion (multi-line content) - `$variable` for inline insertion (short text) +#### Construct Mode (for StructuredContent Output) + +PipeCompose can also generate `StructuredContent` objects using the `construct` section. This mode composes field values from fixed values, variable references, templates, or nested structures. + +**When to use construct mode:** + +- You need to output a structured object (not just Text) +- You want to deterministically compose fields from existing data +- No LLM is needed - just data composition and templating + +##### Basic Construct Usage + +```plx +[concept.SalesSummary] +description = "A structured sales summary" + +[concept.SalesSummary.structure] +report_title = { type = "text", description = "Title of the report" } +customer_name = { type = "text", description = "Customer name" } +deal_value = { type = "number", description = "Deal value" } +summary_text = { type = "text", description = "Generated summary text" } + +[pipe.compose_summary] +type = "PipeCompose" +description = "Compose a sales summary from deal data" +inputs = { deal = "Deal" } +output = "SalesSummary" + +[pipe.compose_summary.construct] +report_title = "Monthly Sales Report" +customer_name = { from = "deal.customer_name" } +deal_value = { from = "deal.amount" } +summary_text = { template = "Deal worth $deal.amount with $deal.customer_name" } +``` + +##### Field Composition Methods + +There are four ways to define field values in a construct: + +**1. Fixed Value (literal)** + +Use a literal value directly: + +```plx +[pipe.compose_report.construct] +report_title = "Annual Report" +report_year = 2024 +is_draft = false +``` + +**2. Variable Reference (`from`)** + +Get a value from working memory using a dotted path: + +```plx +[pipe.compose_report.construct] +customer_name = { from = "deal.customer_name" } +total_amount = { from = "order.total" } +street_address = { from = "customer.address.street" } +``` + +**3. Template (`template`)** + +Render a Jinja2 template with variable substitution: + +```plx +[pipe.compose_report.construct] +invoice_number = { template = "INV-$order.id" } +summary = { template = "Deal worth $deal.amount with $deal.customer_name on {{ current_date }}" } +``` + +**4. Nested Construct** + +For nested structures, use a TOML subsection: + +```plx +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Complete Construct Example + +```plx +domain = "invoicing" + +[concept.Address] +description = "A postal address" + +[concept.Address.structure] +street = { type = "text", description = "Street address" } +city = { type = "text", description = "City name" } +country = { type = "text", description = "Country name" } + +[concept.Invoice] +description = "An invoice document" + +[concept.Invoice.structure] +invoice_number = { type = "text", description = "Invoice number" } +total = { type = "number", description = "Total amount" } + +[pipe.compose_invoice] +type = "PipeCompose" +description = "Compose an invoice from order and customer data" +inputs = { order = "Order", customer = "Customer" } +output = "Invoice" + +[pipe.compose_invoice.construct] +invoice_number = { template = "INV-$order.id" } +total = { from = "order.total_amount" } + +[pipe.compose_invoice.construct.billing_address] +street = { from = "customer.address.street" } +city = { from = "customer.address.city" } +country = "France" +``` + +##### Key Parameters (Construct Mode) + +- `construct`: Dictionary mapping field names to their composition rules +- Each field can be: + - A literal value (string, number, boolean) + - A dict with `from` key for variable reference + - A dict with `template` key for template rendering + - A nested dict for nested structures + +**Note:** You must use either `template` or `construct`, not both. They are mutually exclusive. + ### PipeImgGen operator The PipeImgGen operator is used to generate images using AI image generation models. @@ -952,13 +1087,13 @@ So here are a few concrete examples of calls to execute_pipeline with various wa }, ) -## Here we have a single input and it's a PDF. -## Because PDFContent is a native concept, we can use it directly as a value, +## Here we have a single input and it's a document. +## Because DocumentContent is a native concept, we can use it directly as a value, ## the system knows what content it corresponds to: pipe_output = await execute_pipeline( pipe_code="power_extractor_dpe", inputs={ - "document": PDFContent(url=pdf_url), + "document": DocumentContent(url=pdf_url), }, ) @@ -1081,82 +1216,4 @@ result_list = pipe_output.main_stuff_as_items(item_type=GanttChart) ``` --- - -## Rules to choose LLM models used in PipeLLMs. - -### LLM Configuration System - -In order to use it in a pipe, an LLM is referenced by its llm_handle (alias) and possibly by an llm_preset. -LLM configurations are managed through the new inference backend system with files located in `.pipelex/inference/`: - -- **Model Deck**: `.pipelex/inference/deck/base_deck.toml` and `.pipelex/inference/deck/overrides.toml` -- **Backends**: `.pipelex/inference/backends.toml` and `.pipelex/inference/backends/*.toml` -- **Routing**: `.pipelex/inference/routing_profiles.toml` - -### LLM Handles - -An llm_handle can be either: -1. **A direct model name** (like "gpt-4o-mini", "claude-3-sonnet") - automatically available for all models loaded by the inference backend system -2. **An alias** - user-defined shortcuts that map to model names, defined in the `[aliases]` section: - -```toml -[aliases] -base-claude = "claude-4.5-sonnet" -base-gpt = "gpt-5" -base-gemini = "gemini-2.5-flash" -base-mistral = "mistral-medium" -``` - -The system first looks for direct model names, then checks aliases if no direct match is found. The system handles model routing through backends automatically. - -### Using an LLM Handle in a PipeLLM - -Here is an example of using an llm_handle to specify which LLM to use in a PipeLLM: - -```plx -[pipe.hello_world] -type = "PipeLLM" -description = "Write text about Hello World." -output = "Text" -model = { model = "gpt-5", temperature = 0.9 } -prompt = """ -Write a haiku about Hello World. -""" -``` - -As you can see, to use the LLM, you must also indicate the temperature (float between 0 and 1) and max_tokens (either an int or the string "auto"). - -### LLM Presets - -Presets are meant to record the choice of an llm with its hyper parameters (temperature and max_tokens) if it's good for a particular task. LLM Presets are skill-oriented. - -Examples: -```toml -llm_to_engineer = { model = "base-claude", temperature = 1 } -llm_to_extract_invoice = { model = "claude-4.5-sonnet", temperature = 0.1, max_tokens = "auto" } -``` - -The interest is that these presets can be used to set the LLM choice in a PipeLLM, like this: - -```plx -[pipe.extract_invoice] -type = "PipeLLM" -description = "Extract invoice information from an invoice text transcript" -inputs = { invoice_text = "InvoiceText" } -output = "Invoice" -model = "llm_to_extract_invoice" -prompt = """ -Extract invoice information from this invoice: - -The category of this invoice is: $invoice_details.category. - -@invoice_text -""" -``` - -The setting here `model = "llm_to_extract_invoice"` works because "llm_to_extract_invoice" has been declared as an llm_preset in the deck. -You must not use an LLM preset in a PipeLLM that does not exist in the deck. If needed, you can add llm presets. - - -You can override the predefined llm presets by setting them in `.pipelex/inference/deck/overrides.toml`. diff --git a/crazy/__init__.py b/crazy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/crazy/bundle.plx b/crazy/bundle.plx deleted file mode 100644 index af706cd..0000000 --- a/crazy/bundle.plx +++ /dev/null @@ -1,42 +0,0 @@ -domain = "crazy_image_generation" -description = "Imagining and rendering absurd, funny images with unexpected surreal elements" -main_pipe = "generate_crazy_image" - -[concept.ImagePrompt] -description = """ -A detailed textual description of a scene to be used as input for an image generation model, including subjects, setting, style, and visual details. -""" -refines = "Text" - -[pipe.generate_crazy_image] -type = "PipeSequence" -description = """ -Main pipeline that orchestrates the full crazy image generation flow - imagines a wild, absurd scene concept and renders it as an image -""" -output = "Image" -steps = [ - { pipe = "imagine_scene", result = "image_prompt" }, - { pipe = "render_image", result = "crazy_image" }, -] - -[pipe.imagine_scene] -type = "PipeLLM" -description = """ -Generates a creative, absurd, and hilarious image concept combining unexpected elements in surreal ways - think flying spaghetti monsters, penguins in business suits at a disco, or a T-Rex doing yoga on the moon -""" -output = "ImagePrompt" -model = "cheap_llm_for_creativity" -system_prompt = """ -You are a wildly creative visual concept artist specializing in absurd, surreal, and hilarious imagery. Your task is to generate a structured image prompt that combines unexpected elements in surprising and funny ways. Think outside the box - the more unexpected the combination, the better! -""" -prompt = """ -Generate a creative, absurd, and funny image concept. Combine unexpected elements in a surreal way that would make viewers laugh or do a double-take. Be VERY concise and focus on vivid, specific visual details that work well for image generation. -""" - -[pipe.render_image] -type = "PipeImgGen" -description = "Generates the absurd image based on the creative scene description" -inputs = { image_prompt = "ImagePrompt" } -output = "Image" -prompt = "$image_prompt" -model = "gen_image_high_quality" diff --git a/crazy/bundle_view.html b/crazy/bundle_view.html deleted file mode 100644 index 0e294d7..0000000 --- a/crazy/bundle_view.html +++ /dev/null @@ -1,111 +0,0 @@ - - - - - - - -
Domain: crazy_image_generation
-
-Description: Imagining and rendering absurd, funny images with unexpected surreal elements
-
-Main Pipe: generate_crazy_image
-
-
-
-                                              Concepts                                              
-┌──────────────────────────────────────────────────────────────────────────────────────────────────┐
- Concept: ImagePrompt                                                                             
- Refines: Text                                                                                    
-                                                                                                  
- Description: A detailed textual description of a scene to be used as input for an image          
- generation model, including subjects, setting, style, and visual details.                        
-                                                                                                  
-└──────────────────────────────────────────────────────────────────────────────────────────────────┘
-
-
-                                               Pipes                                                
-┌──────────────────────────────────────────────────────────────────────────────────────────────────┐
- Pipe: generate_crazy_image                                                                       
-                                                                                                  
- Type: PipeSequence (PipeController)                                                              
-                                                                                                  
- Description: Main pipeline that orchestrates the full crazy image generation flow - imagines a   
- wild, absurd scene concept and renders it as an image                                            
-                                                                                                  
-                                                                                                  
- No inputs                                                                                        
-                                                                                                  
- Output: Image                                                                                    
-                                                                                                  
- Sequence Steps:                                                                                  
- ┏━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓                                                          
- ┃ Step ┃ Pipe          ┃ Result name  ┃                                                          
- ┡━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩                                                          
- │    1 │ imagine_scene  image_prompt                                                           
- ├──────┼───────────────┼──────────────┤                                                          
- │    2 │ render_image   crazy_image                                                            
- └──────┴───────────────┴──────────────┘                                                          
-├──────────────────────────────────────────────────────────────────────────────────────────────────┤
- Pipe: imagine_scene                                                                              
-                                                                                                  
- Type: PipeLLM (PipeOperator)                                                                     
-                                                                                                  
- Description: Generates a creative, absurd, and hilarious image concept combining unexpected      
- elements in surreal ways - think flying spaghetti monsters, penguins in business suits at a      
- disco, or a T-Rex doing yoga on the moon                                                         
-                                                                                                  
-                                                                                                  
- No inputs                                                                                        
-                                                                                                  
- Output: ImagePrompt                                                                              
-                                                                                                  
- LLM Skill: cheap_llm_for_creativity                                                              
-                                                                                                  
- ╭─ System Prompt ──────────────────────────────────────────────────────────────────────────────╮ 
-  You are a wildly creative visual concept artist specializing in absurd, surreal, and          
-  hilarious imagery. Your task is to generate a structured image prompt that combines           
-  unexpected elements in surprising and funny ways. Think outside the box - the more            
-  unexpected the combination, the better!                                                       
- ╰──────────────────────────────────────────────────────────────────────────────────────────────╯ 
-                                                                                                  
- ╭─ Prompt ─────────────────────────────────────────────────────────────────────────────────────╮ 
-  Generate a creative, absurd, and funny image concept. Combine unexpected elements in a        
-  surreal way that would make viewers laugh or do a double-take. Be VERY concise and focus on   
-  vivid, specific visual details that work well for image generation.                           
- ╰──────────────────────────────────────────────────────────────────────────────────────────────╯ 
-├──────────────────────────────────────────────────────────────────────────────────────────────────┤
- Pipe: render_image                                                                               
-                                                                                                  
- Type: PipeImgGen (PipeOperator)                                                                  
-                                                                                                  
- Description: Generates the absurd image based on the creative scene description                  
-                                                                                                  
-                                                                                                  
- Input: image_prompt (ImagePrompt)                                                                
-                                                                                                  
- Output: Image                                                                                    
-                                                                                                  
- Image Generation Skill: gen_image_high_quality                                                   
-└──────────────────────────────────────────────────────────────────────────────────────────────────┘
-
- - diff --git a/crazy/bundle_view.svg b/crazy/bundle_view.svg deleted file mode 100644 index a7dbf52..0000000 --- a/crazy/bundle_view.svg +++ /dev/null @@ -1,397 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Rich - - - - - - - - - - Domain: crazy_image_generation - -Description: Imagining and rendering absurd, funny images with unexpected surreal elements - -Main Pipe: generate_crazy_image - - - -                                              Concepts                                               -┌──────────────────────────────────────────────────────────────────────────────────────────────────┐ -Concept: ImagePrompt -Refines: Text - -Description: A detailed textual description of a scene to be used as input for an image  -generation model, including subjects, setting, style, and visual details. - -└──────────────────────────────────────────────────────────────────────────────────────────────────┘ - - -                                               Pipes                                                 -┌──────────────────────────────────────────────────────────────────────────────────────────────────┐ -Pipe: generate_crazy_image - -Type: PipeSequence (PipeController)                                                              - -Description: Main pipeline that orchestrates the full crazy image generation flow - imagines a  -wild, absurd scene concept and renders it as an image - - -No inputs                                                                                        - -Output: Image - -Sequence Steps:                         -┏━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓ -StepPipe         Result name  -┡━━━━━━╇━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩ -   1imagine_sceneimage_prompt -├──────┼───────────────┼──────────────┤ -   2render_image crazy_image  -└──────┴───────────────┴──────────────┘ -├──────────────────────────────────────────────────────────────────────────────────────────────────┤ -Pipe: imagine_scene - -Type: PipeLLM (PipeOperator)                                                                     - -Description: Generates a creative, absurd, and hilarious image concept combining unexpected  -elements in surreal ways - think flying spaghetti monsters, penguins in business suits at a  -disco, or a T-Rex doing yoga on the moon - - -No inputs                                                                                        - -Output: ImagePrompt - -LLM Skill: cheap_llm_for_creativity - -╭─ System Prompt ──────────────────────────────────────────────────────────────────────────────╮ -You are a wildly creative visual concept artist specializing in absurd, surreal, and         -hilarious imagery. Your task is to generate a structured image prompt that combines          -unexpected elements in surprising and funny ways. Think outside the box - the more           -unexpected the combination, the better!                                                      -╰──────────────────────────────────────────────────────────────────────────────────────────────╯ - -╭─ Prompt ─────────────────────────────────────────────────────────────────────────────────────╮ -Generate a creative, absurd, and funny image concept. Combine unexpected elements in a       -surreal way that would make viewers laugh or do a double-take. Be VERY concise and focus on  -vivid, specific visual details that work well for image generation.                          -╰──────────────────────────────────────────────────────────────────────────────────────────────╯ -├──────────────────────────────────────────────────────────────────────────────────────────────────┤ -Pipe: render_image - -Type: PipeImgGen (PipeOperator)                                                                  - -Description: Generates the absurd image based on the creative scene description - - -Input: image_prompt (ImagePrompt)                                                                - -Output: Image - -Image Generation Skill: gen_image_high_quality -└──────────────────────────────────────────────────────────────────────────────────────────────────┘ - - - - diff --git a/crazy/inputs.json b/crazy/inputs.json deleted file mode 100644 index 9e26dfe..0000000 --- a/crazy/inputs.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/crazy/run_generate_crazy_image.py b/crazy/run_generate_crazy_image.py deleted file mode 100644 index 2d0f24d..0000000 --- a/crazy/run_generate_crazy_image.py +++ /dev/null @@ -1,19 +0,0 @@ -import asyncio - -from pipelex.core.stuffs.image_content import ImageContent -from pipelex.pipelex import Pipelex -from pipelex.pipeline.execute import execute_pipeline - - -async def run_generate_crazy_image() -> ImageContent: - pipe_output = await execute_pipeline( - pipe_code="generate_crazy_image", - ) - return pipe_output.main_stuff_as(content_type=ImageContent) - - -if __name__ == "__main__": - # Initialize Pipelex - with Pipelex.make(): - # Run the pipeline - result = asyncio.run(run_generate_crazy_image()) diff --git a/crazy/structures/__init__.py b/crazy/structures/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/crazy/structures/crazy_image_generation_ImagePrompt.py b/crazy/structures/crazy_image_generation_ImagePrompt.py deleted file mode 100644 index 46b45ae..0000000 --- a/crazy/structures/crazy_image_generation_ImagePrompt.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -AUTOGENERATED CODE - DO NOT EDIT - -If you want to customize this structure: - 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file - and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) - 3. Make sure your custom class is importable and registered - -To regenerate: pipelex build structures -""" - -from enum import Enum -from typing import Any, Dict, List, Literal, Optional - -from pipelex.core.stuffs.structured_content import StructuredContent -from pipelex.core.stuffs.text_content import TextContent -from pydantic import Field - - -class ImagePrompt(TextContent): - """Generated ImagePrompt class""" From 98d422075a55eeb611066fbdf6e36216576ffa78 Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Mon, 19 Jan 2026 11:56:45 +0100 Subject: [PATCH 04/10] Pipelex dep --- pyproject.toml | 2 +- uv.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c8bb40a..7fbc63f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ ] [tool.uv.sources] -pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "feature/Chicago" } +pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "pre-release/v0.18.0b1" } [tool.setuptools] diff --git a/uv.lock b/uv.lock index 87b7dfc..9e419e7 100644 --- a/uv.lock +++ b/uv.lock @@ -1480,8 +1480,8 @@ dev = [ requires-dist = [ { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, - { name = "pipelex", git = "https://github.com/Pipelex/pipelex.git?branch=feature%2FChicago" }, - { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=feature%2FChicago" }, + { name = "pipelex", git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b1" }, + { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b1" }, { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, @@ -1961,8 +1961,8 @@ wheels = [ [[package]] name = "pipelex" -version = "0.17.3" -source = { git = "https://github.com/Pipelex/pipelex.git?branch=feature%2FChicago#88f1cefb1901f0759e91b06a2ba2f580291a29f2" } +version = "0.18.0b1" +source = { git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b1#c056b6a7015c4c7c96656e4615578277629e8a5e" } dependencies = [ { name = "aiofiles" }, { name = "backports-strenum", marker = "python_full_version < '3.11'" }, From 6e09ecf99fdae48d846b47f7bbb6307aa4374229 Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Mon, 19 Jan 2026 14:46:38 +0100 Subject: [PATCH 05/10] CI --disable-inference to run without Pipelex Service agreement --- Makefile | 4 ++-- tests/integration/conftest.py | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index b3f7eb7..bb1c61f 100644 --- a/Makefile +++ b/Makefile @@ -209,12 +209,12 @@ cleanall: cleanderived cleanenv cleanconfig codex-tests: env $(call PRINT_TITLE,"Unit testing for Codex") @echo "• Running unit tests for Codex (excluding inference and codex_disabled)" - $(VENV_PYTEST) --exitfirst --quiet -m "not inference and not codex_disabled" || [ $$? = 5 ] + $(VENV_PYTEST) --disable-inference --exitfirst --quiet -m "not inference and not codex_disabled" || [ $$? = 5 ] gha-tests: env $(call PRINT_TITLE,"Unit testing for github actions") @echo "• Running unit tests for github actions (excluding inference and gha_disabled)" - $(VENV_PYTEST) --exitfirst --quiet -m "not inference and not gha_disabled" || [ $$? = 5 ] + $(VENV_PYTEST) --disable-inference --exitfirst --quiet -m "not inference and not gha_disabled" || [ $$? = 5 ] run-all-tests: env $(call PRINT_TITLE,"Running all unit tests") diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 028da5f..ab98a37 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -2,6 +2,8 @@ import pipelex.pipelex import pytest from pipelex.system.configuration.config_check import check_is_initialized +from pipelex.test_extras.shared_pytest_plugins import is_inference_disabled_in_pipelex +from pytest import FixtureRequest from rich import print from rich.console import Console from rich.traceback import Traceback @@ -15,11 +17,13 @@ def check_pipelex_initialized(): @pytest.fixture(scope="module", autouse=True) -def reset_pipelex_config_fixture(): +def reset_pipelex_config_fixture(request: FixtureRequest): # Code to run before each test print("\n[magenta]pipelex setup[/magenta]") try: - pipelex_instance = pipelex.pipelex.Pipelex.make() + pipelex_instance = pipelex.pipelex.Pipelex.make( + disable_inference=is_inference_disabled_in_pipelex(request), + ) except Exception as exc: Console().print(Traceback()) pytest.exit(f"Critical Pipelex setup error: {exc}") From 9451bc7f6c36d7ee6e4a389a510a4e86f6080515 Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Mon, 19 Jan 2026 14:52:17 +0100 Subject: [PATCH 06/10] Removed the separate "Boot test" step since gha-tests already runs all tests (including TestFundamentals) with the --disable-inference flag. The boot test was redundant - it was running the same tests twice, just with a filter. --- .github/workflows/tests-check.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/tests-check.yml b/.github/workflows/tests-check.yml index c94ad16..a347c29 100644 --- a/.github/workflows/tests-check.yml +++ b/.github/workflows/tests-check.yml @@ -43,8 +43,5 @@ jobs: source .venv/bin/activate echo -e "y\nA\n1" |pipelex init - - name: Boot test - run: make tp TEST=TestFundamentals - - name: Run tests run: make gha-tests From 4103dba77c3505543823447be778930aa59734df Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Wed, 21 Jan 2026 12:08:34 +0100 Subject: [PATCH 07/10] Use Chicago b2 --- pyproject.toml | 2 +- uv.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7fbc63f..4d6f0d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ ] [tool.uv.sources] -pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "pre-release/v0.18.0b1" } +pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "pre-release/v0.18.0b2" } [tool.setuptools] diff --git a/uv.lock b/uv.lock index 9e419e7..ebce1e5 100644 --- a/uv.lock +++ b/uv.lock @@ -1480,8 +1480,8 @@ dev = [ requires-dist = [ { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, - { name = "pipelex", git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b1" }, - { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b1" }, + { name = "pipelex", git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b2" }, + { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b2" }, { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, @@ -1961,8 +1961,8 @@ wheels = [ [[package]] name = "pipelex" -version = "0.18.0b1" -source = { git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b1#c056b6a7015c4c7c96656e4615578277629e8a5e" } +version = "0.18.0b2" +source = { git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b2#06b33904bd697791e058c7bc5b43454d96839f46" } dependencies = [ { name = "aiofiles" }, { name = "backports-strenum", marker = "python_full_version < '3.11'" }, From 497c8cebb49996db93a773b1fe2d8ed3906c56ef Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Wed, 21 Jan 2026 16:45:17 +0100 Subject: [PATCH 08/10] git ignores --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 29670a3..443da68 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,9 @@ dist/ # Results results/ -# temps temp/ pipelex_super.toml +pipelex_override.toml +telemetry_override.toml base_llm_deck.toml +.pipelex/storage From 9125497ad4566afaf8463a49110794a58e86abda Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Wed, 21 Jan 2026 16:49:07 +0100 Subject: [PATCH 09/10] Pipelex version dep --- pyproject.toml | 5 +---- uv.lock | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4d6f0d4..382089a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,10 +16,7 @@ classifiers = [ "Operating System :: OS Independent", ] -dependencies = [ - "pipelex[mistralai,anthropic,google,google-genai,bedrock,fal]", - "pipelex==0.17.3", -] +dependencies = ["pipelex[mistralai,anthropic,google,google-genai,bedrock,fal]"] [tool.uv.sources] pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "pre-release/v0.18.0b2" } diff --git a/uv.lock b/uv.lock index ebce1e5..e33af4a 100644 --- a/uv.lock +++ b/uv.lock @@ -1480,7 +1480,6 @@ dev = [ requires-dist = [ { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, - { name = "pipelex", git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b2" }, { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b2" }, { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, From 2cb68b842044c72e0995ad9e74e81bd5a9afb88e Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Thu, 12 Feb 2026 00:24:44 +0100 Subject: [PATCH 10/10] Update for Chicago b3 + some cleanup --- .vscode/settings.json | 1 - pyproject.toml | 4 +- uv.lock | 126 ++++++++++++++++++++---------------------- 3 files changed, 61 insertions(+), 70 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index e908f73..acd31dc 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -7,7 +7,6 @@ "mypy.runUsingActiveInterpreter": true, "search.exclude": { ".mypy_cache/*": true, - "**/dev_context": true }, "files.exclude": { "**/__pycache__": true, diff --git a/pyproject.toml b/pyproject.toml index 382089a..fbb9661 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ description = "Replace this with your project description" # authors = [{ name = "Your Name", email = "your.email@example.com" }] license = "MIT" readme = "README.md" -requires-python = ">=3.10" +requires-python = ">=3.10,<3.15" classifiers = [ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", @@ -19,7 +19,7 @@ classifiers = [ dependencies = ["pipelex[mistralai,anthropic,google,google-genai,bedrock,fal]"] [tool.uv.sources] -pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "pre-release/v0.18.0b2" } +pipelex = { git = "https://github.com/Pipelex/pipelex.git", branch = "pre-release/v0.18.0b3" } [tool.setuptools] diff --git a/uv.lock b/uv.lock index e33af4a..44c8eb5 100644 --- a/uv.lock +++ b/uv.lock @@ -1,6 +1,6 @@ version = 1 revision = 3 -requires-python = ">=3.10" +requires-python = ">=3.10, <3.15" resolution-markers = [ "python_full_version >= '3.11'", "python_full_version < '3.11'", @@ -213,7 +213,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.75.0" +version = "0.79.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -225,9 +225,9 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/04/1f/08e95f4b7e2d35205ae5dcbb4ae97e7d477fc521c275c02609e2931ece2d/anthropic-0.75.0.tar.gz", hash = "sha256:e8607422f4ab616db2ea5baacc215dd5f028da99ce2f022e33c7c535b29f3dfb", size = 439565, upload-time = "2025-11-24T20:41:45.28Z" } +sdist = { url = "https://files.pythonhosted.org/packages/15/b1/91aea3f8fd180d01d133d931a167a78a3737b3fd39ccef2ae8d6619c24fd/anthropic-0.79.0.tar.gz", hash = "sha256:8707aafb3b1176ed6c13e2b1c9fb3efddce90d17aee5d8b83a86c70dcdcca871", size = 509825, upload-time = "2026-02-07T18:06:18.388Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/60/1c/1cd02b7ae64302a6e06724bf80a96401d5313708651d277b1458504a1730/anthropic-0.75.0-py3-none-any.whl", hash = "sha256:ea8317271b6c15d80225a9f3c670152746e88805a7a61e14d4a374577164965b", size = 388164, upload-time = "2025-11-24T20:41:43.587Z" }, + { url = "https://files.pythonhosted.org/packages/95/b2/cc0b8e874a18d7da50b0fda8c99e4ac123f23bf47b471827c5f6f3e4a767/anthropic-0.79.0-py3-none-any.whl", hash = "sha256:04cbd473b6bbda4ca2e41dd670fe2f829a911530f01697d0a1e37321eb75f3cf", size = 405918, upload-time = "2026-02-07T18:06:20.246Z" }, ] [[package]] @@ -539,7 +539,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -893,6 +893,15 @@ google-genai = [ { name = "jsonref" }, ] +[[package]] +name = "invoke" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/bd/b461d3424a24c80490313fd77feeb666ca4f6a28c7e72713e3d9095719b4/invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707", size = 304762, upload-time = "2025-10-11T00:36:35.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, +] + [[package]] name = "jinja2" version = "3.1.6" @@ -1017,15 +1026,6 @@ version = "1.3.0" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/01/d5/40b617ee19d2d79f606ed37f8a81e51158f126d2af67270c68f2b47ae0d5/json2html-1.3.0.tar.gz", hash = "sha256:8951a53662ae9cfd812685facdba693fc950ffc1c1fd1a8a2d3cf4c34600689c", size = 6977, upload-time = "2019-07-03T20:50:03.023Z" } -[[package]] -name = "jsonpath-python" -version = "1.1.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b8/bf/626a72f2d093c5eb4f4de55b443714afa7231beeae40d4a1c69b5c5aa4d1/jsonpath_python-1.1.4.tar.gz", hash = "sha256:bb3e13854e4807c078a1503ae2d87c211b8bff4d9b40b6455ed583b3b50a7fdd", size = 84766, upload-time = "2025-11-25T12:08:39.521Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ee/bc/52e5bf0d9839e082b976c19afcab7561d0d719c7627483bf5dc251d27eed/jsonpath_python-1.1.4-py3-none-any.whl", hash = "sha256:8700cb8610c44da6e5e9bff50232779c44bf7dc5bc62662d49319ee746898442", size = 12687, upload-time = "2025-11-25T12:08:38.453Z" }, -] - [[package]] name = "jsonref" version = "1.1.0" @@ -1237,19 +1237,24 @@ wheels = [ [[package]] name = "mistralai" -version = "1.5.2" +version = "1.12.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "eval-type-backport" }, { name = "httpx" }, - { name = "jsonpath-python" }, + { name = "invoke" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, { name = "pydantic" }, { name = "python-dateutil" }, - { name = "typing-inspect" }, + { name = "pyyaml" }, + { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/53/8d/88b7c48878864f37c554a131d37352a4ed0ea3918df3e8cb625407ff374a/mistralai-1.5.2.tar.gz", hash = "sha256:f39e6e51e8939aac2602e4badcb18712cbee2df33d86100c559333e609b92d17", size = 133473, upload-time = "2025-03-19T18:40:29.617Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a5/6f/6e1755c7ce73c5841ee45702770e62164299ef54818db01be1b91ca263fa/mistralai-1.12.1.tar.gz", hash = "sha256:8d8637100f7ae06c31cccb9407b1f0cd7c96005a881e7221077959577c3b4d4d", size = 242499, upload-time = "2026-02-11T09:18:42.734Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/97/5b428225ca4524b9722c8e1b2812c35f958ec5bb6a58c274c6c07a136da8/mistralai-1.5.2-py3-none-any.whl", hash = "sha256:5b1112acebbcad1afd7732ce0bd60614975b64999801c555c54768ac41f506ae", size = 278149, upload-time = "2025-03-19T18:40:28.232Z" }, + { url = "https://files.pythonhosted.org/packages/95/f0/81a2303a8c4cc75003c96dfacc5bd4731b2f8c1490cad6cddec1825f6d57/mistralai-1.12.1-py3-none-any.whl", hash = "sha256:045adccc3526016c951bacf8b1ee73355083b9d6a36370fce0149039ab386d56", size = 500601, upload-time = "2026-02-11T09:18:41.387Z" }, ] [[package]] @@ -1480,7 +1485,7 @@ dev = [ requires-dist = [ { name = "boto3-stubs", marker = "extra == 'dev'", specifier = ">=1.35.24" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.2" }, - { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b2" }, + { name = "pipelex", extras = ["mistralai", "anthropic", "google", "google-genai", "bedrock", "fal"], git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b3" }, { name = "pyright", marker = "extra == 'dev'", specifier = ">=1.1.405" }, { name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.1" }, { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.24.0" }, @@ -1762,32 +1767,32 @@ wheels = [ [[package]] name = "opentelemetry-api" -version = "1.39.1" +version = "1.38.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } +sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, + { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.39.1" +version = "1.38.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-proto" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e9/9d/22d241b66f7bbde88a3bfa6847a351d2c46b84de23e71222c6aae25c7050/opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464", size = 20409, upload-time = "2025-12-11T13:32:40.885Z" } +sdist = { url = "https://files.pythonhosted.org/packages/19/83/dd4660f2956ff88ed071e9e0e36e830df14b8c5dc06722dbde1841accbe8/opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c", size = 20431, upload-time = "2025-10-16T08:35:53.285Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/02/ffc3e143d89a27ac21fd557365b98bd0653b98de8a101151d5805b5d4c33/opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde", size = 18366, upload-time = "2025-12-11T13:32:20.2Z" }, + { url = "https://files.pythonhosted.org/packages/a7/9e/55a41c9601191e8cd8eb626b54ee6827b9c9d4a46d736f32abc80d8039fc/opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a", size = 18359, upload-time = "2025-10-16T08:35:34.099Z" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-http" -version = "1.39.1" +version = "1.38.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "googleapis-common-protos" }, @@ -1798,48 +1803,48 @@ dependencies = [ { name = "requests" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/80/04/2a08fa9c0214ae38880df01e8bfae12b067ec0793446578575e5080d6545/opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb", size = 17288, upload-time = "2025-12-11T13:32:42.029Z" } +sdist = { url = "https://files.pythonhosted.org/packages/81/0a/debcdfb029fbd1ccd1563f7c287b89a6f7bef3b2902ade56797bfd020854/opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b", size = 17282, upload-time = "2025-10-16T08:35:54.422Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/95/f1/b27d3e2e003cd9a3592c43d099d2ed8d0a947c15281bf8463a256db0b46c/opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985", size = 19641, upload-time = "2025-12-11T13:32:22.248Z" }, + { url = "https://files.pythonhosted.org/packages/e5/77/154004c99fb9f291f74aa0822a2f5bbf565a72d8126b3a1b63ed8e5f83c7/opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b", size = 19579, upload-time = "2025-10-16T08:35:36.269Z" }, ] [[package]] name = "opentelemetry-proto" -version = "1.39.1" +version = "1.38.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/49/1d/f25d76d8260c156c40c97c9ed4511ec0f9ce353f8108ca6e7561f82a06b2/opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8", size = 46152, upload-time = "2025-12-11T13:32:48.681Z" } +sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/51/95/b40c96a7b5203005a0b03d8ce8cd212ff23f1793d5ba289c87a097571b18/opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007", size = 72535, upload-time = "2025-12-11T13:32:33.866Z" }, + { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, ] [[package]] name = "opentelemetry-sdk" -version = "1.39.1" +version = "1.38.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } +sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, + { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.60b1" +version = "0.59b0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } +sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, + { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, ] [[package]] @@ -1960,8 +1965,8 @@ wheels = [ [[package]] name = "pipelex" -version = "0.18.0b2" -source = { git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b2#06b33904bd697791e058c7bc5b43454d96839f46" } +version = "0.18.0b3" +source = { git = "https://github.com/Pipelex/pipelex.git?branch=pre-release%2Fv0.18.0b3#b6095405505fba33b9b99294e3025d769f3af7c1" } dependencies = [ { name = "aiofiles" }, { name = "backports-strenum", marker = "python_full_version < '3.11'" }, @@ -2396,22 +2401,22 @@ wheels = [ [[package]] name = "pypdfium2" -version = "5.1.0" +version = "4.30.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1c/87/56782107fa242137b77ccddc30519bbb33e7a9eed9da9649d9db45db2c64/pypdfium2-5.1.0.tar.gz", hash = "sha256:46335ca30a1584b804a6824da84d2e846b4b954bdfc342d035b7bf15ed9a14e5", size = 270104, upload-time = "2025-11-23T13:36:52.589Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/14/838b3ba247a0ba92e4df5d23f2bea9478edcfd72b78a39d6ca36ccd84ad2/pypdfium2-4.30.0.tar.gz", hash = "sha256:48b5b7e5566665bc1015b9d69c1ebabe21f6aee468b509531c3c8318eeee2e16", size = 140239, upload-time = "2024-05-09T18:33:17.552Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/d7/46ce255322cd29f0db3772667a0da3db8ed137e1e9b9aa306ac5691765b3/pypdfium2-5.1.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f3dde94d320d582d3c20255b600f1e7e03261bfdea139b7064b54126fc3db4e2", size = 2817789, upload-time = "2025-11-23T13:36:31.423Z" }, - { url = "https://files.pythonhosted.org/packages/19/a5/4ad3c1b336fdc2b7a88d835c56bcd64ce60d4a95d1a9eaafc44f853da582/pypdfium2-5.1.0-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:dee09b7a3ab1860a17decc97c179a5aaba5a74b2780d53c91daa18d742945892", size = 2940861, upload-time = "2025-11-23T13:36:33.519Z" }, - { url = "https://files.pythonhosted.org/packages/19/93/d13ca66d5e075d7e27736c51c15955cdd3266ac0a8327613c3c520d43693/pypdfium2-5.1.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1757d6470cbf5b8d1c825350df2ccd79fd0bfcf5753ff566fd02153a486014b1", size = 2980933, upload-time = "2025-11-23T13:36:35.283Z" }, - { url = "https://files.pythonhosted.org/packages/a2/7c/02744ef9e0363af08f9ed47c0e603ef8713e02d4a48492c76d5bf36f65c3/pypdfium2-5.1.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad18e95497423f88b33f2976cb78c27f0bd6ef4b4bf340c901f5f28a234c4f06", size = 2762960, upload-time = "2025-11-23T13:36:37.033Z" }, - { url = "https://files.pythonhosted.org/packages/89/26/f0abcfccb99b0a5c4451b70b0e72ccb7c27387931af01eae982870272202/pypdfium2-5.1.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2faee2f4fbd5bd33dd77c07d15ccaa6687562d883a54c4beb8329ebaee615b7d", size = 3060522, upload-time = "2025-11-23T13:36:38.835Z" }, - { url = "https://files.pythonhosted.org/packages/2f/74/92f508e71178aa85de32454762f84d6f9cef35c468caab3e0f1041dae464/pypdfium2-5.1.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d688372df169a9dad606c1e5ad34b6e0e6b820f1e0d540b4780711600a7bf8dd", size = 2995178, upload-time = "2025-11-23T13:36:40.319Z" }, - { url = "https://files.pythonhosted.org/packages/94/9f/91ca099ea64b24e19ef05da72e33d0ef0840e104d89cbdcb618da12629b5/pypdfium2-5.1.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:cfecd2b20f1c05027aaa2af6bfbcc2835b4c8f6455155b0dc2800ec6a2051965", size = 6321704, upload-time = "2025-11-23T13:36:42.177Z" }, - { url = "https://files.pythonhosted.org/packages/e0/4b/5628cfda9f534b3acc1e2cf50f9e9582cd9cfd86cf2ce718da229de6e709/pypdfium2-5.1.0-py3-none-musllinux_1_1_i686.whl", hash = "sha256:5698de8e6d662f1b2cdff5cb62e6f0ee79ffaaa13e282251854cbc64cf712449", size = 6329892, upload-time = "2025-11-23T13:36:43.757Z" }, - { url = "https://files.pythonhosted.org/packages/c5/25/5d2db765f8f82129d75ea2883ed26af3d1a64d8daaa20a11005ac681e2c3/pypdfium2-5.1.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:2cbd73093fbb1710ea1164cdf27583363e1b663b8cc22d555c84af0ee1af50c7", size = 6409889, upload-time = "2025-11-23T13:36:45.387Z" }, - { url = "https://files.pythonhosted.org/packages/89/d3/135ed8ca46044cd5005cd104ead13bea417777afa65d7af5a710eb68d340/pypdfium2-5.1.0-py3-none-win32.whl", hash = "sha256:11d319cd2e5f71cdc3d68e8a79142b559a0edbcc16fe31d4036fcfc45f0e9ed8", size = 2991546, upload-time = "2025-11-23T13:36:47.373Z" }, - { url = "https://files.pythonhosted.org/packages/52/8f/884a1b2fd7c747a98e9b4c95097c08b39d042a88837ac72f2945a7f6162c/pypdfium2-5.1.0-py3-none-win_amd64.whl", hash = "sha256:4725f347a8c9ff011a7035d8267ee25912ab1b946034ba0b57f3cca89de8847a", size = 3100176, upload-time = "2025-11-23T13:36:49.234Z" }, - { url = "https://files.pythonhosted.org/packages/d7/5c/72448636ea0ccd44878f77bb5d59a2c967a54eec806ee2e0d894ef0d2434/pypdfium2-5.1.0-py3-none-win_arm64.whl", hash = "sha256:47c5593f7eb6ae0f1e5a940d712d733ede580f09ca91de6c3f89611848695c0f", size = 2941500, upload-time = "2025-11-23T13:36:50.69Z" }, + { url = "https://files.pythonhosted.org/packages/c7/9a/c8ff5cc352c1b60b0b97642ae734f51edbab6e28b45b4fcdfe5306ee3c83/pypdfium2-4.30.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:b33ceded0b6ff5b2b93bc1fe0ad4b71aa6b7e7bd5875f1ca0cdfb6ba6ac01aab", size = 2837254, upload-time = "2024-05-09T18:32:48.653Z" }, + { url = "https://files.pythonhosted.org/packages/21/8b/27d4d5409f3c76b985f4ee4afe147b606594411e15ac4dc1c3363c9a9810/pypdfium2-4.30.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:4e55689f4b06e2d2406203e771f78789bd4f190731b5d57383d05cf611d829de", size = 2707624, upload-time = "2024-05-09T18:32:51.458Z" }, + { url = "https://files.pythonhosted.org/packages/11/63/28a73ca17c24b41a205d658e177d68e198d7dde65a8c99c821d231b6ee3d/pypdfium2-4.30.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e6e50f5ce7f65a40a33d7c9edc39f23140c57e37144c2d6d9e9262a2a854854", size = 2793126, upload-time = "2024-05-09T18:32:53.581Z" }, + { url = "https://files.pythonhosted.org/packages/d1/96/53b3ebf0955edbd02ac6da16a818ecc65c939e98fdeb4e0958362bd385c8/pypdfium2-4.30.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3d0dd3ecaffd0b6dbda3da663220e705cb563918249bda26058c6036752ba3a2", size = 2591077, upload-time = "2024-05-09T18:32:55.99Z" }, + { url = "https://files.pythonhosted.org/packages/ec/ee/0394e56e7cab8b5b21f744d988400948ef71a9a892cbeb0b200d324ab2c7/pypdfium2-4.30.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc3bf29b0db8c76cdfaac1ec1cde8edf211a7de7390fbf8934ad2aa9b4d6dfad", size = 2864431, upload-time = "2024-05-09T18:32:57.911Z" }, + { url = "https://files.pythonhosted.org/packages/65/cd/3f1edf20a0ef4a212a5e20a5900e64942c5a374473671ac0780eaa08ea80/pypdfium2-4.30.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1f78d2189e0ddf9ac2b7a9b9bd4f0c66f54d1389ff6c17e9fd9dc034d06eb3f", size = 2812008, upload-time = "2024-05-09T18:32:59.886Z" }, + { url = "https://files.pythonhosted.org/packages/c8/91/2d517db61845698f41a2a974de90762e50faeb529201c6b3574935969045/pypdfium2-4.30.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:5eda3641a2da7a7a0b2f4dbd71d706401a656fea521b6b6faa0675b15d31a163", size = 6181543, upload-time = "2024-05-09T18:33:02.597Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c4/ed1315143a7a84b2c7616569dfb472473968d628f17c231c39e29ae9d780/pypdfium2-4.30.0-py3-none-musllinux_1_1_i686.whl", hash = "sha256:0dfa61421b5eb68e1188b0b2231e7ba35735aef2d867d86e48ee6cab6975195e", size = 6175911, upload-time = "2024-05-09T18:33:05.376Z" }, + { url = "https://files.pythonhosted.org/packages/7a/c4/9e62d03f414e0e3051c56d5943c3bf42aa9608ede4e19dc96438364e9e03/pypdfium2-4.30.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:f33bd79e7a09d5f7acca3b0b69ff6c8a488869a7fab48fdf400fec6e20b9c8be", size = 6267430, upload-time = "2024-05-09T18:33:08.067Z" }, + { url = "https://files.pythonhosted.org/packages/90/47/eda4904f715fb98561e34012826e883816945934a851745570521ec89520/pypdfium2-4.30.0-py3-none-win32.whl", hash = "sha256:ee2410f15d576d976c2ab2558c93d392a25fb9f6635e8dd0a8a3a5241b275e0e", size = 2775951, upload-time = "2024-05-09T18:33:10.567Z" }, + { url = "https://files.pythonhosted.org/packages/25/bd/56d9ec6b9f0fc4e0d95288759f3179f0fcd34b1a1526b75673d2f6d5196f/pypdfium2-4.30.0-py3-none-win_amd64.whl", hash = "sha256:90dbb2ac07be53219f56be09961eb95cf2473f834d01a42d901d13ccfad64b4c", size = 2892098, upload-time = "2024-05-09T18:33:13.107Z" }, + { url = "https://files.pythonhosted.org/packages/be/7a/097801205b991bc3115e8af1edb850d30aeaf0118520b016354cf5ccd3f6/pypdfium2-4.30.0-py3-none-win_arm64.whl", hash = "sha256:119b2969a6d6b1e8d55e99caaf05290294f2d0fe49c12a3f17102d01c441bd29", size = 2752118, upload-time = "2024-05-09T18:33:15.489Z" }, ] [[package]] @@ -2956,19 +2961,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] -[[package]] -name = "typing-inspect" -version = "0.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mypy-extensions" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825, upload-time = "2023-05-24T20:25:47.612Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827, upload-time = "2023-05-24T20:25:45.287Z" }, -] - [[package]] name = "typing-inspection" version = "0.4.2"