From 021bd5e6138574884befe6f20ba86ceeefee1767 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 27 Mar 2026 23:52:21 +0000
Subject: [PATCH 01/26] refactor: remove fine_tuning API
---
.stats.yml | 8 +-
api.md | 36 --
.../resources/alpha/__init__.py | 14 -
.../resources/alpha/alpha.py | 32 --
.../resources/alpha/post_training/__init__.py | 39 --
.../resources/alpha/post_training/job.py | 402 ------------------
.../alpha/post_training/post_training.py | 395 -----------------
.../resources/beta/datasets.py | 4 +-
.../types/alpha/__init__.py | 8 -
.../alpha/list_post_training_jobs_response.py | 16 -
.../types/alpha/post_training/__init__.py | 13 -
.../post_training/job_artifacts_response.py | 51 ---
.../alpha/post_training/job_list_response.py | 16 -
.../post_training/job_status_response.py | 63 ---
.../types/alpha/post_training_job.py | 15 -
...ost_training_preference_optimize_params.py | 116 -----
...st_training_supervised_fine_tune_params.py | 148 -------
.../types/beta/dataset_list_response.py | 2 +-
.../types/beta/dataset_register_params.py | 2 +-
.../types/beta/dataset_register_response.py | 2 +-
.../types/beta/dataset_retrieve_response.py | 2 +-
.../alpha/post_training/__init__.py | 7 -
.../alpha/post_training/test_job.py | 308 --------------
.../api_resources/alpha/test_post_training.py | 386 -----------------
tests/api_resources/beta/test_datasets.py | 16 +-
25 files changed, 18 insertions(+), 2083 deletions(-)
delete mode 100644 src/llama_stack_client/resources/alpha/post_training/__init__.py
delete mode 100644 src/llama_stack_client/resources/alpha/post_training/job.py
delete mode 100644 src/llama_stack_client/resources/alpha/post_training/post_training.py
delete mode 100644 src/llama_stack_client/types/alpha/list_post_training_jobs_response.py
delete mode 100644 src/llama_stack_client/types/alpha/post_training/__init__.py
delete mode 100644 src/llama_stack_client/types/alpha/post_training/job_artifacts_response.py
delete mode 100644 src/llama_stack_client/types/alpha/post_training/job_list_response.py
delete mode 100644 src/llama_stack_client/types/alpha/post_training/job_status_response.py
delete mode 100644 src/llama_stack_client/types/alpha/post_training_job.py
delete mode 100644 src/llama_stack_client/types/alpha/post_training_preference_optimize_params.py
delete mode 100644 src/llama_stack_client/types/alpha/post_training_supervised_fine_tune_params.py
delete mode 100644 tests/api_resources/alpha/post_training/__init__.py
delete mode 100644 tests/api_resources/alpha/post_training/test_job.py
delete mode 100644 tests/api_resources/alpha/test_post_training.py
diff --git a/.stats.yml b/.stats.yml
index ed6389b0..5503e9a9 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 108
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-1b387ba7b0e0d1aa931032ac2101e5a473b9fa42975e6575cf889feace342b80.yml
-openapi_spec_hash: a144868005520bd3f8f9dc3d8cac1c22
-config_hash: ef1f9b33e203c71cfc10d91890c1ed2d
+configured_endpoints: 102
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-60d07d12e08e84edf3d1d8665182f45548d840821600ee994b7e4f32c40c1d49.yml
+openapi_spec_hash: 375be93b0b1348b7dac1a4749f806870
+config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
diff --git a/api.md b/api.md
index e4ada90b..713b81ec 100644
--- a/api.md
+++ b/api.md
@@ -442,42 +442,6 @@ Methods:
# Alpha
-## PostTraining
-
-Types:
-
-```python
-from llama_stack_client.types.alpha import (
- AlgorithmConfig,
- ListPostTrainingJobsResponse,
- PostTrainingJob,
-)
-```
-
-Methods:
-
-- client.alpha.post_training.preference_optimize(\*\*params) -> PostTrainingJob
-- client.alpha.post_training.supervised_fine_tune(\*\*params) -> PostTrainingJob
-
-### Job
-
-Types:
-
-```python
-from llama_stack_client.types.alpha.post_training import (
- JobListResponse,
- JobArtifactsResponse,
- JobStatusResponse,
-)
-```
-
-Methods:
-
-- client.alpha.post_training.job.list() -> JobListResponse
-- client.alpha.post_training.job.artifacts(job_uuid) -> JobArtifactsResponse
-- client.alpha.post_training.job.cancel(job_uuid) -> None
-- client.alpha.post_training.job.status(job_uuid) -> JobStatusResponse
-
## Benchmarks
Types:
diff --git a/src/llama_stack_client/resources/alpha/__init__.py b/src/llama_stack_client/resources/alpha/__init__.py
index 6de437ae..681e5bb0 100644
--- a/src/llama_stack_client/resources/alpha/__init__.py
+++ b/src/llama_stack_client/resources/alpha/__init__.py
@@ -46,22 +46,8 @@
BenchmarksResourceWithStreamingResponse,
AsyncBenchmarksResourceWithStreamingResponse,
)
-from .post_training import (
- PostTrainingResource,
- AsyncPostTrainingResource,
- PostTrainingResourceWithRawResponse,
- AsyncPostTrainingResourceWithRawResponse,
- PostTrainingResourceWithStreamingResponse,
- AsyncPostTrainingResourceWithStreamingResponse,
-)
__all__ = [
- "PostTrainingResource",
- "AsyncPostTrainingResource",
- "PostTrainingResourceWithRawResponse",
- "AsyncPostTrainingResourceWithRawResponse",
- "PostTrainingResourceWithStreamingResponse",
- "AsyncPostTrainingResourceWithStreamingResponse",
"BenchmarksResource",
"AsyncBenchmarksResource",
"BenchmarksResourceWithRawResponse",
diff --git a/src/llama_stack_client/resources/alpha/alpha.py b/src/llama_stack_client/resources/alpha/alpha.py
index eac3360a..3c471fc4 100644
--- a/src/llama_stack_client/resources/alpha/alpha.py
+++ b/src/llama_stack_client/resources/alpha/alpha.py
@@ -42,23 +42,11 @@
AsyncBenchmarksResourceWithStreamingResponse,
)
from ..._resource import SyncAPIResource, AsyncAPIResource
-from .post_training.post_training import (
- PostTrainingResource,
- AsyncPostTrainingResource,
- PostTrainingResourceWithRawResponse,
- AsyncPostTrainingResourceWithRawResponse,
- PostTrainingResourceWithStreamingResponse,
- AsyncPostTrainingResourceWithStreamingResponse,
-)
__all__ = ["AlphaResource", "AsyncAlphaResource"]
class AlphaResource(SyncAPIResource):
- @cached_property
- def post_training(self) -> PostTrainingResource:
- return PostTrainingResource(self._client)
-
@cached_property
def benchmarks(self) -> BenchmarksResource:
return BenchmarksResource(self._client)
@@ -107,10 +95,6 @@ def with_streaming_response(self) -> AlphaResourceWithStreamingResponse:
class AsyncAlphaResource(AsyncAPIResource):
- @cached_property
- def post_training(self) -> AsyncPostTrainingResource:
- return AsyncPostTrainingResource(self._client)
-
@cached_property
def benchmarks(self) -> AsyncBenchmarksResource:
return AsyncBenchmarksResource(self._client)
@@ -162,10 +146,6 @@ class AlphaResourceWithRawResponse:
def __init__(self, alpha: AlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def post_training(self) -> PostTrainingResourceWithRawResponse:
- return PostTrainingResourceWithRawResponse(self._alpha.post_training)
-
@cached_property
def benchmarks(self) -> BenchmarksResourceWithRawResponse:
return BenchmarksResourceWithRawResponse(self._alpha.benchmarks)
@@ -198,10 +178,6 @@ class AsyncAlphaResourceWithRawResponse:
def __init__(self, alpha: AsyncAlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def post_training(self) -> AsyncPostTrainingResourceWithRawResponse:
- return AsyncPostTrainingResourceWithRawResponse(self._alpha.post_training)
-
@cached_property
def benchmarks(self) -> AsyncBenchmarksResourceWithRawResponse:
return AsyncBenchmarksResourceWithRawResponse(self._alpha.benchmarks)
@@ -234,10 +210,6 @@ class AlphaResourceWithStreamingResponse:
def __init__(self, alpha: AlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def post_training(self) -> PostTrainingResourceWithStreamingResponse:
- return PostTrainingResourceWithStreamingResponse(self._alpha.post_training)
-
@cached_property
def benchmarks(self) -> BenchmarksResourceWithStreamingResponse:
return BenchmarksResourceWithStreamingResponse(self._alpha.benchmarks)
@@ -270,10 +242,6 @@ class AsyncAlphaResourceWithStreamingResponse:
def __init__(self, alpha: AsyncAlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def post_training(self) -> AsyncPostTrainingResourceWithStreamingResponse:
- return AsyncPostTrainingResourceWithStreamingResponse(self._alpha.post_training)
-
@cached_property
def benchmarks(self) -> AsyncBenchmarksResourceWithStreamingResponse:
return AsyncBenchmarksResourceWithStreamingResponse(self._alpha.benchmarks)
diff --git a/src/llama_stack_client/resources/alpha/post_training/__init__.py b/src/llama_stack_client/resources/alpha/post_training/__init__.py
deleted file mode 100644
index 81a6a807..00000000
--- a/src/llama_stack_client/resources/alpha/post_training/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .job import (
- JobResource,
- AsyncJobResource,
- JobResourceWithRawResponse,
- AsyncJobResourceWithRawResponse,
- JobResourceWithStreamingResponse,
- AsyncJobResourceWithStreamingResponse,
-)
-from .post_training import (
- PostTrainingResource,
- AsyncPostTrainingResource,
- PostTrainingResourceWithRawResponse,
- AsyncPostTrainingResourceWithRawResponse,
- PostTrainingResourceWithStreamingResponse,
- AsyncPostTrainingResourceWithStreamingResponse,
-)
-
-__all__ = [
- "JobResource",
- "AsyncJobResource",
- "JobResourceWithRawResponse",
- "AsyncJobResourceWithRawResponse",
- "JobResourceWithStreamingResponse",
- "AsyncJobResourceWithStreamingResponse",
- "PostTrainingResource",
- "AsyncPostTrainingResource",
- "PostTrainingResourceWithRawResponse",
- "AsyncPostTrainingResourceWithRawResponse",
- "PostTrainingResourceWithStreamingResponse",
- "AsyncPostTrainingResourceWithStreamingResponse",
-]
diff --git a/src/llama_stack_client/resources/alpha/post_training/job.py b/src/llama_stack_client/resources/alpha/post_training/job.py
deleted file mode 100644
index cf949615..00000000
--- a/src/llama_stack_client/resources/alpha/post_training/job.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Type, cast
-
-import httpx
-
-from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._wrappers import DataWrapper
-from ...._base_client import make_request_options
-from ....types.alpha.post_training.job_list_response import JobListResponse
-from ....types.alpha.post_training.job_status_response import JobStatusResponse
-from ....types.alpha.post_training.job_artifacts_response import JobArtifactsResponse
-
-__all__ = ["JobResource", "AsyncJobResource"]
-
-
-class JobResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> JobResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return JobResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> JobResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return JobResourceWithStreamingResponse(self)
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> JobListResponse:
- """Get all training jobs."""
- return self._get(
- "/v1alpha/post-training/jobs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[JobListResponse]._unwrapper,
- ),
- cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]),
- )
-
- def artifacts(
- self,
- job_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> JobArtifactsResponse:
- """
- Get the artifacts of a training job.
-
- Args:
- job_uuid: The UUID of the job to get the artifacts of.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not job_uuid:
- raise ValueError(f"Expected a non-empty value for `job_uuid` but received {job_uuid!r}")
- return self._get(
- f"/v1alpha/post-training/jobs/{job_uuid}/artifacts",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=JobArtifactsResponse,
- )
-
- def cancel(
- self,
- job_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Cancel a training job.
-
- Args:
- job_uuid: The UUID of the job to cancel.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not job_uuid:
- raise ValueError(f"Expected a non-empty value for `job_uuid` but received {job_uuid!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- f"/v1alpha/post-training/jobs/{job_uuid}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- def status(
- self,
- job_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> JobStatusResponse:
- """
- Get the status of a training job.
-
- Args:
- job_uuid: The UUID of the job to get the status of.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not job_uuid:
- raise ValueError(f"Expected a non-empty value for `job_uuid` but received {job_uuid!r}")
- return self._get(
- f"/v1alpha/post-training/jobs/{job_uuid}/status",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=JobStatusResponse,
- )
-
-
-class AsyncJobResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncJobResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncJobResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncJobResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncJobResourceWithStreamingResponse(self)
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> JobListResponse:
- """Get all training jobs."""
- return await self._get(
- "/v1alpha/post-training/jobs",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[JobListResponse]._unwrapper,
- ),
- cast_to=cast(Type[JobListResponse], DataWrapper[JobListResponse]),
- )
-
- async def artifacts(
- self,
- job_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> JobArtifactsResponse:
- """
- Get the artifacts of a training job.
-
- Args:
- job_uuid: The UUID of the job to get the artifacts of.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not job_uuid:
- raise ValueError(f"Expected a non-empty value for `job_uuid` but received {job_uuid!r}")
- return await self._get(
- f"/v1alpha/post-training/jobs/{job_uuid}/artifacts",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=JobArtifactsResponse,
- )
-
- async def cancel(
- self,
- job_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Cancel a training job.
-
- Args:
- job_uuid: The UUID of the job to cancel.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not job_uuid:
- raise ValueError(f"Expected a non-empty value for `job_uuid` but received {job_uuid!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- f"/v1alpha/post-training/jobs/{job_uuid}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- async def status(
- self,
- job_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> JobStatusResponse:
- """
- Get the status of a training job.
-
- Args:
- job_uuid: The UUID of the job to get the status of.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not job_uuid:
- raise ValueError(f"Expected a non-empty value for `job_uuid` but received {job_uuid!r}")
- return await self._get(
- f"/v1alpha/post-training/jobs/{job_uuid}/status",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=JobStatusResponse,
- )
-
-
-class JobResourceWithRawResponse:
- def __init__(self, job: JobResource) -> None:
- self._job = job
-
- self.list = to_raw_response_wrapper(
- job.list,
- )
- self.artifacts = to_raw_response_wrapper(
- job.artifacts,
- )
- self.cancel = to_raw_response_wrapper(
- job.cancel,
- )
- self.status = to_raw_response_wrapper(
- job.status,
- )
-
-
-class AsyncJobResourceWithRawResponse:
- def __init__(self, job: AsyncJobResource) -> None:
- self._job = job
-
- self.list = async_to_raw_response_wrapper(
- job.list,
- )
- self.artifacts = async_to_raw_response_wrapper(
- job.artifacts,
- )
- self.cancel = async_to_raw_response_wrapper(
- job.cancel,
- )
- self.status = async_to_raw_response_wrapper(
- job.status,
- )
-
-
-class JobResourceWithStreamingResponse:
- def __init__(self, job: JobResource) -> None:
- self._job = job
-
- self.list = to_streamed_response_wrapper(
- job.list,
- )
- self.artifacts = to_streamed_response_wrapper(
- job.artifacts,
- )
- self.cancel = to_streamed_response_wrapper(
- job.cancel,
- )
- self.status = to_streamed_response_wrapper(
- job.status,
- )
-
-
-class AsyncJobResourceWithStreamingResponse:
- def __init__(self, job: AsyncJobResource) -> None:
- self._job = job
-
- self.list = async_to_streamed_response_wrapper(
- job.list,
- )
- self.artifacts = async_to_streamed_response_wrapper(
- job.artifacts,
- )
- self.cancel = async_to_streamed_response_wrapper(
- job.cancel,
- )
- self.status = async_to_streamed_response_wrapper(
- job.status,
- )
diff --git a/src/llama_stack_client/resources/alpha/post_training/post_training.py b/src/llama_stack_client/resources/alpha/post_training/post_training.py
deleted file mode 100644
index 0d054d86..00000000
--- a/src/llama_stack_client/resources/alpha/post_training/post_training.py
+++ /dev/null
@@ -1,395 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-
-import httpx
-
-from .job import (
- JobResource,
- AsyncJobResource,
- JobResourceWithRawResponse,
- AsyncJobResourceWithRawResponse,
- JobResourceWithStreamingResponse,
- AsyncJobResourceWithStreamingResponse,
-)
-from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ....types.alpha import post_training_preference_optimize_params, post_training_supervised_fine_tune_params
-from ...._base_client import make_request_options
-from ....types.alpha.post_training_job import PostTrainingJob
-
-__all__ = ["PostTrainingResource", "AsyncPostTrainingResource"]
-
-
-class PostTrainingResource(SyncAPIResource):
- @cached_property
- def job(self) -> JobResource:
- return JobResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> PostTrainingResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return PostTrainingResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> PostTrainingResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return PostTrainingResourceWithStreamingResponse(self)
-
- def preference_optimize(
- self,
- *,
- algorithm_config: post_training_preference_optimize_params.AlgorithmConfig,
- finetuned_model: str,
- hyperparam_search_config: Dict[str, object],
- job_uuid: str,
- logger_config: Dict[str, object],
- training_config: post_training_preference_optimize_params.TrainingConfig,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> PostTrainingJob:
- """
- Run preference optimization of a model.
-
- Args:
- algorithm_config: The algorithm configuration.
-
- finetuned_model: The model to fine-tune.
-
- hyperparam_search_config: The hyperparam search configuration.
-
- job_uuid: The UUID of the job to create.
-
- logger_config: The logger configuration.
-
- training_config: The training configuration.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1alpha/post-training/preference-optimize",
- body=maybe_transform(
- {
- "algorithm_config": algorithm_config,
- "finetuned_model": finetuned_model,
- "hyperparam_search_config": hyperparam_search_config,
- "job_uuid": job_uuid,
- "logger_config": logger_config,
- "training_config": training_config,
- },
- post_training_preference_optimize_params.PostTrainingPreferenceOptimizeParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PostTrainingJob,
- )
-
- def supervised_fine_tune(
- self,
- *,
- hyperparam_search_config: Dict[str, object],
- job_uuid: str,
- logger_config: Dict[str, object],
- training_config: post_training_supervised_fine_tune_params.TrainingConfig,
- algorithm_config: Optional[post_training_supervised_fine_tune_params.AlgorithmConfig] | Omit = omit,
- checkpoint_dir: Optional[str] | Omit = omit,
- model: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> PostTrainingJob:
- """
- Run supervised fine-tuning of a model.
-
- Args:
- hyperparam_search_config: The hyperparam search configuration.
-
- job_uuid: The UUID of the job to create.
-
- logger_config: The logger configuration.
-
- training_config: The training configuration.
-
- algorithm_config: The algorithm configuration.
-
- checkpoint_dir: The directory to save checkpoint(s) to.
-
- model: Model descriptor for training if not in provider config
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1alpha/post-training/supervised-fine-tune",
- body=maybe_transform(
- {
- "hyperparam_search_config": hyperparam_search_config,
- "job_uuid": job_uuid,
- "logger_config": logger_config,
- "training_config": training_config,
- "algorithm_config": algorithm_config,
- "checkpoint_dir": checkpoint_dir,
- "model": model,
- },
- post_training_supervised_fine_tune_params.PostTrainingSupervisedFineTuneParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PostTrainingJob,
- )
-
-
-class AsyncPostTrainingResource(AsyncAPIResource):
- @cached_property
- def job(self) -> AsyncJobResource:
- return AsyncJobResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncPostTrainingResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncPostTrainingResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncPostTrainingResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncPostTrainingResourceWithStreamingResponse(self)
-
- async def preference_optimize(
- self,
- *,
- algorithm_config: post_training_preference_optimize_params.AlgorithmConfig,
- finetuned_model: str,
- hyperparam_search_config: Dict[str, object],
- job_uuid: str,
- logger_config: Dict[str, object],
- training_config: post_training_preference_optimize_params.TrainingConfig,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> PostTrainingJob:
- """
- Run preference optimization of a model.
-
- Args:
- algorithm_config: The algorithm configuration.
-
- finetuned_model: The model to fine-tune.
-
- hyperparam_search_config: The hyperparam search configuration.
-
- job_uuid: The UUID of the job to create.
-
- logger_config: The logger configuration.
-
- training_config: The training configuration.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1alpha/post-training/preference-optimize",
- body=await async_maybe_transform(
- {
- "algorithm_config": algorithm_config,
- "finetuned_model": finetuned_model,
- "hyperparam_search_config": hyperparam_search_config,
- "job_uuid": job_uuid,
- "logger_config": logger_config,
- "training_config": training_config,
- },
- post_training_preference_optimize_params.PostTrainingPreferenceOptimizeParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PostTrainingJob,
- )
-
- async def supervised_fine_tune(
- self,
- *,
- hyperparam_search_config: Dict[str, object],
- job_uuid: str,
- logger_config: Dict[str, object],
- training_config: post_training_supervised_fine_tune_params.TrainingConfig,
- algorithm_config: Optional[post_training_supervised_fine_tune_params.AlgorithmConfig] | Omit = omit,
- checkpoint_dir: Optional[str] | Omit = omit,
- model: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> PostTrainingJob:
- """
- Run supervised fine-tuning of a model.
-
- Args:
- hyperparam_search_config: The hyperparam search configuration.
-
- job_uuid: The UUID of the job to create.
-
- logger_config: The logger configuration.
-
- training_config: The training configuration.
-
- algorithm_config: The algorithm configuration.
-
- checkpoint_dir: The directory to save checkpoint(s) to.
-
- model: Model descriptor for training if not in provider config
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1alpha/post-training/supervised-fine-tune",
- body=await async_maybe_transform(
- {
- "hyperparam_search_config": hyperparam_search_config,
- "job_uuid": job_uuid,
- "logger_config": logger_config,
- "training_config": training_config,
- "algorithm_config": algorithm_config,
- "checkpoint_dir": checkpoint_dir,
- "model": model,
- },
- post_training_supervised_fine_tune_params.PostTrainingSupervisedFineTuneParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=PostTrainingJob,
- )
-
-
-class PostTrainingResourceWithRawResponse:
- def __init__(self, post_training: PostTrainingResource) -> None:
- self._post_training = post_training
-
- self.preference_optimize = to_raw_response_wrapper(
- post_training.preference_optimize,
- )
- self.supervised_fine_tune = to_raw_response_wrapper(
- post_training.supervised_fine_tune,
- )
-
- @cached_property
- def job(self) -> JobResourceWithRawResponse:
- return JobResourceWithRawResponse(self._post_training.job)
-
-
-class AsyncPostTrainingResourceWithRawResponse:
- def __init__(self, post_training: AsyncPostTrainingResource) -> None:
- self._post_training = post_training
-
- self.preference_optimize = async_to_raw_response_wrapper(
- post_training.preference_optimize,
- )
- self.supervised_fine_tune = async_to_raw_response_wrapper(
- post_training.supervised_fine_tune,
- )
-
- @cached_property
- def job(self) -> AsyncJobResourceWithRawResponse:
- return AsyncJobResourceWithRawResponse(self._post_training.job)
-
-
-class PostTrainingResourceWithStreamingResponse:
- def __init__(self, post_training: PostTrainingResource) -> None:
- self._post_training = post_training
-
- self.preference_optimize = to_streamed_response_wrapper(
- post_training.preference_optimize,
- )
- self.supervised_fine_tune = to_streamed_response_wrapper(
- post_training.supervised_fine_tune,
- )
-
- @cached_property
- def job(self) -> JobResourceWithStreamingResponse:
- return JobResourceWithStreamingResponse(self._post_training.job)
-
-
-class AsyncPostTrainingResourceWithStreamingResponse:
- def __init__(self, post_training: AsyncPostTrainingResource) -> None:
- self._post_training = post_training
-
- self.preference_optimize = async_to_streamed_response_wrapper(
- post_training.preference_optimize,
- )
- self.supervised_fine_tune = async_to_streamed_response_wrapper(
- post_training.supervised_fine_tune,
- )
-
- @cached_property
- def job(self) -> AsyncJobResourceWithStreamingResponse:
- return AsyncJobResourceWithStreamingResponse(self._post_training.job)
diff --git a/src/llama_stack_client/resources/beta/datasets.py b/src/llama_stack_client/resources/beta/datasets.py
index 59844f43..0dd1071c 100644
--- a/src/llama_stack_client/resources/beta/datasets.py
+++ b/src/llama_stack_client/resources/beta/datasets.py
@@ -227,7 +227,7 @@ def iterrows(
def register(
self,
*,
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
+ purpose: Literal["eval/question-answer", "eval/messages-answer"],
source: dataset_register_params.Source,
dataset_id: Optional[str] | Omit = omit,
metadata: Optional[Dict[str, object]] | Omit = omit,
@@ -505,7 +505,7 @@ async def iterrows(
async def register(
self,
*,
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"],
+ purpose: Literal["eval/question-answer", "eval/messages-answer"],
source: dataset_register_params.Source,
dataset_id: Optional[str] | Omit = omit,
metadata: Optional[Dict[str, object]] | Omit = omit,
diff --git a/src/llama_stack_client/types/alpha/__init__.py b/src/llama_stack_client/types/alpha/__init__.py
index f9423b58..a1d4e7ec 100644
--- a/src/llama_stack_client/types/alpha/__init__.py
+++ b/src/llama_stack_client/types/alpha/__init__.py
@@ -11,7 +11,6 @@
from .job import Job as Job
from .benchmark import Benchmark as Benchmark
from .evaluate_response import EvaluateResponse as EvaluateResponse
-from .post_training_job import PostTrainingJob as PostTrainingJob
from .eval_run_eval_params import EvalRunEvalParams as EvalRunEvalParams
from .benchmark_config_param import BenchmarkConfigParam as BenchmarkConfigParam
from .benchmark_list_response import BenchmarkListResponse as BenchmarkListResponse
@@ -23,10 +22,3 @@
from .inference_rerank_response import InferenceRerankResponse as InferenceRerankResponse
from .eval_run_eval_alpha_params import EvalRunEvalAlphaParams as EvalRunEvalAlphaParams
from .eval_evaluate_rows_alpha_params import EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams
-from .list_post_training_jobs_response import ListPostTrainingJobsResponse as ListPostTrainingJobsResponse
-from .post_training_preference_optimize_params import (
- PostTrainingPreferenceOptimizeParams as PostTrainingPreferenceOptimizeParams,
-)
-from .post_training_supervised_fine_tune_params import (
- PostTrainingSupervisedFineTuneParams as PostTrainingSupervisedFineTuneParams,
-)
diff --git a/src/llama_stack_client/types/alpha/list_post_training_jobs_response.py b/src/llama_stack_client/types/alpha/list_post_training_jobs_response.py
deleted file mode 100644
index 6c87bcd7..00000000
--- a/src/llama_stack_client/types/alpha/list_post_training_jobs_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from ..._models import BaseModel
-from .post_training.job_list_response import JobListResponse
-
-__all__ = ["ListPostTrainingJobsResponse"]
-
-
-class ListPostTrainingJobsResponse(BaseModel):
- data: JobListResponse
diff --git a/src/llama_stack_client/types/alpha/post_training/__init__.py b/src/llama_stack_client/types/alpha/post_training/__init__.py
deleted file mode 100644
index fd24ff14..00000000
--- a/src/llama_stack_client/types/alpha/post_training/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .job_list_response import JobListResponse as JobListResponse
-from .job_status_response import JobStatusResponse as JobStatusResponse
-from .job_artifacts_response import JobArtifactsResponse as JobArtifactsResponse
diff --git a/src/llama_stack_client/types/alpha/post_training/job_artifacts_response.py b/src/llama_stack_client/types/alpha/post_training/job_artifacts_response.py
deleted file mode 100644
index 235ec773..00000000
--- a/src/llama_stack_client/types/alpha/post_training/job_artifacts_response.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from datetime import datetime
-
-from ...._models import BaseModel
-
-__all__ = ["JobArtifactsResponse", "Checkpoint", "CheckpointTrainingMetrics"]
-
-
-class CheckpointTrainingMetrics(BaseModel):
- """Training metrics captured during post-training jobs."""
-
- epoch: int
-
- perplexity: float
-
- train_loss: float
-
- validation_loss: float
-
-
-class Checkpoint(BaseModel):
- """Checkpoint created during training runs."""
-
- created_at: datetime
-
- epoch: int
-
- identifier: str
-
- path: str
-
- post_training_job_id: str
-
- training_metrics: Optional[CheckpointTrainingMetrics] = None
- """Training metrics captured during post-training jobs."""
-
-
-class JobArtifactsResponse(BaseModel):
- """Artifacts of a finetuning job."""
-
- job_uuid: str
-
- checkpoints: Optional[List[Checkpoint]] = None
diff --git a/src/llama_stack_client/types/alpha/post_training/job_list_response.py b/src/llama_stack_client/types/alpha/post_training/job_list_response.py
deleted file mode 100644
index 2d47e18d..00000000
--- a/src/llama_stack_client/types/alpha/post_training/job_list_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from ..post_training_job import PostTrainingJob
-
-__all__ = ["JobListResponse"]
-
-JobListResponse: TypeAlias = List[PostTrainingJob]
diff --git a/src/llama_stack_client/types/alpha/post_training/job_status_response.py b/src/llama_stack_client/types/alpha/post_training/job_status_response.py
deleted file mode 100644
index dbafbaa4..00000000
--- a/src/llama_stack_client/types/alpha/post_training/job_status_response.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from datetime import datetime
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-
-__all__ = ["JobStatusResponse", "Checkpoint", "CheckpointTrainingMetrics"]
-
-
-class CheckpointTrainingMetrics(BaseModel):
- """Training metrics captured during post-training jobs."""
-
- epoch: int
-
- perplexity: float
-
- train_loss: float
-
- validation_loss: float
-
-
-class Checkpoint(BaseModel):
- """Checkpoint created during training runs."""
-
- created_at: datetime
-
- epoch: int
-
- identifier: str
-
- path: str
-
- post_training_job_id: str
-
- training_metrics: Optional[CheckpointTrainingMetrics] = None
- """Training metrics captured during post-training jobs."""
-
-
-class JobStatusResponse(BaseModel):
- """Status of a finetuning job."""
-
- job_uuid: str
-
- status: Literal["completed", "in_progress", "failed", "scheduled", "cancelled"]
- """Status of a job execution."""
-
- checkpoints: Optional[List[Checkpoint]] = None
-
- completed_at: Optional[datetime] = None
-
- resources_allocated: Optional[Dict[str, object]] = None
-
- scheduled_at: Optional[datetime] = None
-
- started_at: Optional[datetime] = None
diff --git a/src/llama_stack_client/types/alpha/post_training_job.py b/src/llama_stack_client/types/alpha/post_training_job.py
deleted file mode 100644
index 5d3a5391..00000000
--- a/src/llama_stack_client/types/alpha/post_training_job.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from ..._models import BaseModel
-
-__all__ = ["PostTrainingJob"]
-
-
-class PostTrainingJob(BaseModel):
- job_uuid: str
diff --git a/src/llama_stack_client/types/alpha/post_training_preference_optimize_params.py b/src/llama_stack_client/types/alpha/post_training_preference_optimize_params.py
deleted file mode 100644
index 39512687..00000000
--- a/src/llama_stack_client/types/alpha/post_training_preference_optimize_params.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = [
- "PostTrainingPreferenceOptimizeParams",
- "AlgorithmConfig",
- "TrainingConfig",
- "TrainingConfigDataConfig",
- "TrainingConfigEfficiencyConfig",
- "TrainingConfigOptimizerConfig",
-]
-
-
-class PostTrainingPreferenceOptimizeParams(TypedDict, total=False):
- algorithm_config: Required[AlgorithmConfig]
- """The algorithm configuration."""
-
- finetuned_model: Required[str]
- """The model to fine-tune."""
-
- hyperparam_search_config: Required[Dict[str, object]]
- """The hyperparam search configuration."""
-
- job_uuid: Required[str]
- """The UUID of the job to create."""
-
- logger_config: Required[Dict[str, object]]
- """The logger configuration."""
-
- training_config: Required[TrainingConfig]
- """The training configuration."""
-
-
-class AlgorithmConfig(TypedDict, total=False):
- """The algorithm configuration."""
-
- beta: Required[float]
-
- loss_type: Literal["sigmoid", "hinge", "ipo", "kto_pair"]
-
-
-class TrainingConfigDataConfig(TypedDict, total=False):
- """Configuration for training data and data loading."""
-
- batch_size: Required[int]
-
- data_format: Required[Literal["instruct", "dialog"]]
- """Format of the training dataset."""
-
- dataset_id: Required[str]
-
- shuffle: Required[bool]
-
- packed: Optional[bool]
-
- train_on_input: Optional[bool]
-
- validation_dataset_id: Optional[str]
-
-
-class TrainingConfigEfficiencyConfig(TypedDict, total=False):
- """Configuration for memory and compute efficiency optimizations."""
-
- enable_activation_checkpointing: Optional[bool]
-
- enable_activation_offloading: Optional[bool]
-
- fsdp_cpu_offload: Optional[bool]
-
- memory_efficient_fsdp_wrap: Optional[bool]
-
-
-class TrainingConfigOptimizerConfig(TypedDict, total=False):
- """Configuration parameters for the optimization algorithm."""
-
- lr: Required[float]
-
- num_warmup_steps: Required[int]
-
- optimizer_type: Required[Literal["adam", "adamw", "sgd"]]
- """Available optimizer algorithms for training."""
-
- weight_decay: Required[float]
-
-
-class TrainingConfig(TypedDict, total=False):
- """The training configuration."""
-
- n_epochs: Required[int]
-
- data_config: Optional[TrainingConfigDataConfig]
- """Configuration for training data and data loading."""
-
- dtype: Optional[str]
-
- efficiency_config: Optional[TrainingConfigEfficiencyConfig]
- """Configuration for memory and compute efficiency optimizations."""
-
- gradient_accumulation_steps: int
-
- max_steps_per_epoch: int
-
- max_validation_steps: Optional[int]
-
- optimizer_config: Optional[TrainingConfigOptimizerConfig]
- """Configuration parameters for the optimization algorithm."""
diff --git a/src/llama_stack_client/types/alpha/post_training_supervised_fine_tune_params.py b/src/llama_stack_client/types/alpha/post_training_supervised_fine_tune_params.py
deleted file mode 100644
index d97ed3b5..00000000
--- a/src/llama_stack_client/types/alpha/post_training_supervised_fine_tune_params.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..._types import SequenceNotStr
-
-__all__ = [
- "PostTrainingSupervisedFineTuneParams",
- "TrainingConfig",
- "TrainingConfigDataConfig",
- "TrainingConfigEfficiencyConfig",
- "TrainingConfigOptimizerConfig",
- "AlgorithmConfig",
- "AlgorithmConfigLoraFinetuningConfig",
- "AlgorithmConfigQatFinetuningConfig",
-]
-
-
-class PostTrainingSupervisedFineTuneParams(TypedDict, total=False):
- hyperparam_search_config: Required[Dict[str, object]]
- """The hyperparam search configuration."""
-
- job_uuid: Required[str]
- """The UUID of the job to create."""
-
- logger_config: Required[Dict[str, object]]
- """The logger configuration."""
-
- training_config: Required[TrainingConfig]
- """The training configuration."""
-
- algorithm_config: Optional[AlgorithmConfig]
- """The algorithm configuration."""
-
- checkpoint_dir: Optional[str]
- """The directory to save checkpoint(s) to."""
-
- model: Optional[str]
- """Model descriptor for training if not in provider config"""
-
-
-class TrainingConfigDataConfig(TypedDict, total=False):
- """Configuration for training data and data loading."""
-
- batch_size: Required[int]
-
- data_format: Required[Literal["instruct", "dialog"]]
- """Format of the training dataset."""
-
- dataset_id: Required[str]
-
- shuffle: Required[bool]
-
- packed: Optional[bool]
-
- train_on_input: Optional[bool]
-
- validation_dataset_id: Optional[str]
-
-
-class TrainingConfigEfficiencyConfig(TypedDict, total=False):
- """Configuration for memory and compute efficiency optimizations."""
-
- enable_activation_checkpointing: Optional[bool]
-
- enable_activation_offloading: Optional[bool]
-
- fsdp_cpu_offload: Optional[bool]
-
- memory_efficient_fsdp_wrap: Optional[bool]
-
-
-class TrainingConfigOptimizerConfig(TypedDict, total=False):
- """Configuration parameters for the optimization algorithm."""
-
- lr: Required[float]
-
- num_warmup_steps: Required[int]
-
- optimizer_type: Required[Literal["adam", "adamw", "sgd"]]
- """Available optimizer algorithms for training."""
-
- weight_decay: Required[float]
-
-
-class TrainingConfig(TypedDict, total=False):
- """The training configuration."""
-
- n_epochs: Required[int]
-
- data_config: Optional[TrainingConfigDataConfig]
- """Configuration for training data and data loading."""
-
- dtype: Optional[str]
-
- efficiency_config: Optional[TrainingConfigEfficiencyConfig]
- """Configuration for memory and compute efficiency optimizations."""
-
- gradient_accumulation_steps: int
-
- max_steps_per_epoch: int
-
- max_validation_steps: Optional[int]
-
- optimizer_config: Optional[TrainingConfigOptimizerConfig]
- """Configuration parameters for the optimization algorithm."""
-
-
-class AlgorithmConfigLoraFinetuningConfig(TypedDict, total=False):
- """Configuration for Low-Rank Adaptation (LoRA) fine-tuning."""
-
- alpha: Required[int]
-
- apply_lora_to_mlp: Required[bool]
-
- apply_lora_to_output: Required[bool]
-
- lora_attn_modules: Required[SequenceNotStr[str]]
-
- rank: Required[int]
-
- quantize_base: Optional[bool]
-
- type: Literal["LoRA"]
-
- use_dora: Optional[bool]
-
-
-class AlgorithmConfigQatFinetuningConfig(TypedDict, total=False):
- """Configuration for Quantization-Aware Training (QAT) fine-tuning."""
-
- group_size: Required[int]
-
- quantizer_name: Required[str]
-
- type: Literal["QAT"]
-
-
-AlgorithmConfig: TypeAlias = Union[AlgorithmConfigLoraFinetuningConfig, AlgorithmConfigQatFinetuningConfig]
diff --git a/src/llama_stack_client/types/beta/dataset_list_response.py b/src/llama_stack_client/types/beta/dataset_list_response.py
index 56c8a177..e28253d8 100644
--- a/src/llama_stack_client/types/beta/dataset_list_response.py
+++ b/src/llama_stack_client/types/beta/dataset_list_response.py
@@ -64,7 +64,7 @@ class DatasetListResponseItem(BaseModel):
provider_id: str
"""ID of the provider that owns this resource"""
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
+ purpose: Literal["eval/question-answer", "eval/messages-answer"]
"""Purpose of the dataset indicating its intended use"""
source: DatasetListResponseItemSource
diff --git a/src/llama_stack_client/types/beta/dataset_register_params.py b/src/llama_stack_client/types/beta/dataset_register_params.py
index 8891285d..b743b212 100644
--- a/src/llama_stack_client/types/beta/dataset_register_params.py
+++ b/src/llama_stack_client/types/beta/dataset_register_params.py
@@ -15,7 +15,7 @@
class DatasetRegisterParams(TypedDict, total=False):
- purpose: Required[Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]]
+ purpose: Required[Literal["eval/question-answer", "eval/messages-answer"]]
"""The purpose of the dataset."""
source: Required[Source]
diff --git a/src/llama_stack_client/types/beta/dataset_register_response.py b/src/llama_stack_client/types/beta/dataset_register_response.py
index d662f682..39596092 100644
--- a/src/llama_stack_client/types/beta/dataset_register_response.py
+++ b/src/llama_stack_client/types/beta/dataset_register_response.py
@@ -55,7 +55,7 @@ class DatasetRegisterResponse(BaseModel):
provider_id: str
"""ID of the provider that owns this resource"""
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
+ purpose: Literal["eval/question-answer", "eval/messages-answer"]
"""Purpose of the dataset indicating its intended use"""
source: Source
diff --git a/src/llama_stack_client/types/beta/dataset_retrieve_response.py b/src/llama_stack_client/types/beta/dataset_retrieve_response.py
index 5349e2fb..053c27d8 100644
--- a/src/llama_stack_client/types/beta/dataset_retrieve_response.py
+++ b/src/llama_stack_client/types/beta/dataset_retrieve_response.py
@@ -55,7 +55,7 @@ class DatasetRetrieveResponse(BaseModel):
provider_id: str
"""ID of the provider that owns this resource"""
- purpose: Literal["post-training/messages", "eval/question-answer", "eval/messages-answer"]
+ purpose: Literal["eval/question-answer", "eval/messages-answer"]
"""Purpose of the dataset indicating its intended use"""
source: Source
diff --git a/tests/api_resources/alpha/post_training/__init__.py b/tests/api_resources/alpha/post_training/__init__.py
deleted file mode 100644
index 6a8e62e9..00000000
--- a/tests/api_resources/alpha/post_training/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/alpha/post_training/test_job.py b/tests/api_resources/alpha/post_training/test_job.py
deleted file mode 100644
index 8bf91c22..00000000
--- a/tests/api_resources/alpha/post_training/test_job.py
+++ /dev/null
@@ -1,308 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.alpha.post_training import JobListResponse, JobStatusResponse, JobArtifactsResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestJob:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- job = client.alpha.post_training.job.list()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- response = client.alpha.post_training.job.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with client.alpha.post_training.job.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_artifacts(self, client: LlamaStackClient) -> None:
- job = client.alpha.post_training.job.artifacts(
- "job_uuid",
- )
- assert_matches_type(JobArtifactsResponse, job, path=["response"])
-
- @parametrize
- def test_raw_response_artifacts(self, client: LlamaStackClient) -> None:
- response = client.alpha.post_training.job.with_raw_response.artifacts(
- "job_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(JobArtifactsResponse, job, path=["response"])
-
- @parametrize
- def test_streaming_response_artifacts(self, client: LlamaStackClient) -> None:
- with client.alpha.post_training.job.with_streaming_response.artifacts(
- "job_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(JobArtifactsResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_artifacts(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_uuid` but received ''"):
- client.alpha.post_training.job.with_raw_response.artifacts(
- "",
- )
-
- @parametrize
- def test_method_cancel(self, client: LlamaStackClient) -> None:
- job = client.alpha.post_training.job.cancel(
- "job_uuid",
- )
- assert job is None
-
- @parametrize
- def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
- response = client.alpha.post_training.job.with_raw_response.cancel(
- "job_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert job is None
-
- @parametrize
- def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
- with client.alpha.post_training.job.with_streaming_response.cancel(
- "job_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert job is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_cancel(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_uuid` but received ''"):
- client.alpha.post_training.job.with_raw_response.cancel(
- "",
- )
-
- @parametrize
- def test_method_status(self, client: LlamaStackClient) -> None:
- job = client.alpha.post_training.job.status(
- "job_uuid",
- )
- assert_matches_type(JobStatusResponse, job, path=["response"])
-
- @parametrize
- def test_raw_response_status(self, client: LlamaStackClient) -> None:
- response = client.alpha.post_training.job.with_raw_response.status(
- "job_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(JobStatusResponse, job, path=["response"])
-
- @parametrize
- def test_streaming_response_status(self, client: LlamaStackClient) -> None:
- with client.alpha.post_training.job.with_streaming_response.status(
- "job_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(JobStatusResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_status(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_uuid` but received ''"):
- client.alpha.post_training.job.with_raw_response.status(
- "",
- )
-
-
-class TestAsyncJob:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- job = await async_client.alpha.post_training.job.list()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.post_training.job.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.post_training.job.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(JobListResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_artifacts(self, async_client: AsyncLlamaStackClient) -> None:
- job = await async_client.alpha.post_training.job.artifacts(
- "job_uuid",
- )
- assert_matches_type(JobArtifactsResponse, job, path=["response"])
-
- @parametrize
- async def test_raw_response_artifacts(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.post_training.job.with_raw_response.artifacts(
- "job_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(JobArtifactsResponse, job, path=["response"])
-
- @parametrize
- async def test_streaming_response_artifacts(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.post_training.job.with_streaming_response.artifacts(
- "job_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(JobArtifactsResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_artifacts(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_uuid` but received ''"):
- await async_client.alpha.post_training.job.with_raw_response.artifacts(
- "",
- )
-
- @parametrize
- async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- job = await async_client.alpha.post_training.job.cancel(
- "job_uuid",
- )
- assert job is None
-
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.post_training.job.with_raw_response.cancel(
- "job_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert job is None
-
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.post_training.job.with_streaming_response.cancel(
- "job_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert job is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_uuid` but received ''"):
- await async_client.alpha.post_training.job.with_raw_response.cancel(
- "",
- )
-
- @parametrize
- async def test_method_status(self, async_client: AsyncLlamaStackClient) -> None:
- job = await async_client.alpha.post_training.job.status(
- "job_uuid",
- )
- assert_matches_type(JobStatusResponse, job, path=["response"])
-
- @parametrize
- async def test_raw_response_status(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.post_training.job.with_raw_response.status(
- "job_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(JobStatusResponse, job, path=["response"])
-
- @parametrize
- async def test_streaming_response_status(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.post_training.job.with_streaming_response.status(
- "job_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(JobStatusResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_status(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_uuid` but received ''"):
- await async_client.alpha.post_training.job.with_raw_response.status(
- "",
- )
diff --git a/tests/api_resources/alpha/test_post_training.py b/tests/api_resources/alpha/test_post_training.py
deleted file mode 100644
index 21fdd345..00000000
--- a/tests/api_resources/alpha/test_post_training.py
+++ /dev/null
@@ -1,386 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.alpha import (
- PostTrainingJob,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestPostTraining:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_preference_optimize(self, client: LlamaStackClient) -> None:
- post_training = client.alpha.post_training.preference_optimize(
- algorithm_config={"beta": 0},
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- def test_method_preference_optimize_with_all_params(self, client: LlamaStackClient) -> None:
- post_training = client.alpha.post_training.preference_optimize(
- algorithm_config={
- "beta": 0,
- "loss_type": "sigmoid",
- },
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={
- "n_epochs": 0,
- "data_config": {
- "batch_size": 0,
- "data_format": "instruct",
- "dataset_id": "dataset_id",
- "shuffle": True,
- "packed": True,
- "train_on_input": True,
- "validation_dataset_id": "validation_dataset_id",
- },
- "dtype": "dtype",
- "efficiency_config": {
- "enable_activation_checkpointing": True,
- "enable_activation_offloading": True,
- "fsdp_cpu_offload": True,
- "memory_efficient_fsdp_wrap": True,
- },
- "gradient_accumulation_steps": 0,
- "max_steps_per_epoch": 0,
- "max_validation_steps": 0,
- "optimizer_config": {
- "lr": 0,
- "num_warmup_steps": 0,
- "optimizer_type": "adam",
- "weight_decay": 0,
- },
- },
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- def test_raw_response_preference_optimize(self, client: LlamaStackClient) -> None:
- response = client.alpha.post_training.with_raw_response.preference_optimize(
- algorithm_config={"beta": 0},
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- post_training = response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- def test_streaming_response_preference_optimize(self, client: LlamaStackClient) -> None:
- with client.alpha.post_training.with_streaming_response.preference_optimize(
- algorithm_config={"beta": 0},
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- post_training = response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_supervised_fine_tune(self, client: LlamaStackClient) -> None:
- post_training = client.alpha.post_training.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- def test_method_supervised_fine_tune_with_all_params(self, client: LlamaStackClient) -> None:
- post_training = client.alpha.post_training.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={
- "n_epochs": 0,
- "data_config": {
- "batch_size": 0,
- "data_format": "instruct",
- "dataset_id": "dataset_id",
- "shuffle": True,
- "packed": True,
- "train_on_input": True,
- "validation_dataset_id": "validation_dataset_id",
- },
- "dtype": "dtype",
- "efficiency_config": {
- "enable_activation_checkpointing": True,
- "enable_activation_offloading": True,
- "fsdp_cpu_offload": True,
- "memory_efficient_fsdp_wrap": True,
- },
- "gradient_accumulation_steps": 0,
- "max_steps_per_epoch": 0,
- "max_validation_steps": 0,
- "optimizer_config": {
- "lr": 0,
- "num_warmup_steps": 0,
- "optimizer_type": "adam",
- "weight_decay": 0,
- },
- },
- algorithm_config={
- "alpha": 0,
- "apply_lora_to_mlp": True,
- "apply_lora_to_output": True,
- "lora_attn_modules": ["string"],
- "rank": 0,
- "quantize_base": True,
- "type": "LoRA",
- "use_dora": True,
- },
- checkpoint_dir="checkpoint_dir",
- model="model",
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- def test_raw_response_supervised_fine_tune(self, client: LlamaStackClient) -> None:
- response = client.alpha.post_training.with_raw_response.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- post_training = response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- def test_streaming_response_supervised_fine_tune(self, client: LlamaStackClient) -> None:
- with client.alpha.post_training.with_streaming_response.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- post_training = response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncPostTraining:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_preference_optimize(self, async_client: AsyncLlamaStackClient) -> None:
- post_training = await async_client.alpha.post_training.preference_optimize(
- algorithm_config={"beta": 0},
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- async def test_method_preference_optimize_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- post_training = await async_client.alpha.post_training.preference_optimize(
- algorithm_config={
- "beta": 0,
- "loss_type": "sigmoid",
- },
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={
- "n_epochs": 0,
- "data_config": {
- "batch_size": 0,
- "data_format": "instruct",
- "dataset_id": "dataset_id",
- "shuffle": True,
- "packed": True,
- "train_on_input": True,
- "validation_dataset_id": "validation_dataset_id",
- },
- "dtype": "dtype",
- "efficiency_config": {
- "enable_activation_checkpointing": True,
- "enable_activation_offloading": True,
- "fsdp_cpu_offload": True,
- "memory_efficient_fsdp_wrap": True,
- },
- "gradient_accumulation_steps": 0,
- "max_steps_per_epoch": 0,
- "max_validation_steps": 0,
- "optimizer_config": {
- "lr": 0,
- "num_warmup_steps": 0,
- "optimizer_type": "adam",
- "weight_decay": 0,
- },
- },
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- async def test_raw_response_preference_optimize(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.post_training.with_raw_response.preference_optimize(
- algorithm_config={"beta": 0},
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- post_training = await response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- async def test_streaming_response_preference_optimize(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.post_training.with_streaming_response.preference_optimize(
- algorithm_config={"beta": 0},
- finetuned_model="finetuned_model",
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- post_training = await response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_supervised_fine_tune(self, async_client: AsyncLlamaStackClient) -> None:
- post_training = await async_client.alpha.post_training.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- async def test_method_supervised_fine_tune_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- post_training = await async_client.alpha.post_training.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={
- "n_epochs": 0,
- "data_config": {
- "batch_size": 0,
- "data_format": "instruct",
- "dataset_id": "dataset_id",
- "shuffle": True,
- "packed": True,
- "train_on_input": True,
- "validation_dataset_id": "validation_dataset_id",
- },
- "dtype": "dtype",
- "efficiency_config": {
- "enable_activation_checkpointing": True,
- "enable_activation_offloading": True,
- "fsdp_cpu_offload": True,
- "memory_efficient_fsdp_wrap": True,
- },
- "gradient_accumulation_steps": 0,
- "max_steps_per_epoch": 0,
- "max_validation_steps": 0,
- "optimizer_config": {
- "lr": 0,
- "num_warmup_steps": 0,
- "optimizer_type": "adam",
- "weight_decay": 0,
- },
- },
- algorithm_config={
- "alpha": 0,
- "apply_lora_to_mlp": True,
- "apply_lora_to_output": True,
- "lora_attn_modules": ["string"],
- "rank": 0,
- "quantize_base": True,
- "type": "LoRA",
- "use_dora": True,
- },
- checkpoint_dir="checkpoint_dir",
- model="model",
- )
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- async def test_raw_response_supervised_fine_tune(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.post_training.with_raw_response.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- post_training = await response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- @parametrize
- async def test_streaming_response_supervised_fine_tune(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.post_training.with_streaming_response.supervised_fine_tune(
- hyperparam_search_config={"foo": "bar"},
- job_uuid="job_uuid",
- logger_config={"foo": "bar"},
- training_config={"n_epochs": 0},
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- post_training = await response.parse()
- assert_matches_type(PostTrainingJob, post_training, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/beta/test_datasets.py b/tests/api_resources/beta/test_datasets.py
index 3a018fc0..a050fb78 100644
--- a/tests/api_resources/beta/test_datasets.py
+++ b/tests/api_resources/beta/test_datasets.py
@@ -190,7 +190,7 @@ def test_path_params_iterrows(self, client: LlamaStackClient) -> None:
def test_method_register(self, client: LlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
dataset = client.beta.datasets.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
@@ -203,7 +203,7 @@ def test_method_register(self, client: LlamaStackClient) -> None:
def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
dataset = client.beta.datasets.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
@@ -218,7 +218,7 @@ def test_method_register_with_all_params(self, client: LlamaStackClient) -> None
def test_raw_response_register(self, client: LlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
response = client.beta.datasets.with_raw_response.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
@@ -234,7 +234,7 @@ def test_raw_response_register(self, client: LlamaStackClient) -> None:
def test_streaming_response_register(self, client: LlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
with client.beta.datasets.with_streaming_response.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
@@ -457,7 +457,7 @@ async def test_path_params_iterrows(self, async_client: AsyncLlamaStackClient) -
async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
dataset = await async_client.beta.datasets.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
@@ -470,7 +470,7 @@ async def test_method_register(self, async_client: AsyncLlamaStackClient) -> Non
async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
dataset = await async_client.beta.datasets.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
@@ -485,7 +485,7 @@ async def test_method_register_with_all_params(self, async_client: AsyncLlamaSta
async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.datasets.with_raw_response.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
@@ -501,7 +501,7 @@ async def test_raw_response_register(self, async_client: AsyncLlamaStackClient)
async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.datasets.with_streaming_response.register(
- purpose="post-training/messages",
+ purpose="eval/question-answer",
source={
"uri": "uri",
"type": "uri",
From d6a79d0a830bad4e82b70d7ab9e007ebc16e0f05 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 11 Mar 2026 16:54:19 +0000
Subject: [PATCH 02/26] fix: remove duplicate dataset_id parameter in
append-rows endpoint
---
.stats.yml | 4 +-
api.md | 2 +-
.../resources/beta/datasets.py | 42 ++++++-------------
.../types/beta/dataset_appendrows_params.py | 7 +---
tests/api_resources/beta/test_datasets.py | 28 +++++--------
5 files changed, 26 insertions(+), 57 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 5503e9a9..1fce0cef 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 102
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-60d07d12e08e84edf3d1d8665182f45548d840821600ee994b7e4f32c40c1d49.yml
-openapi_spec_hash: 375be93b0b1348b7dac1a4749f806870
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-36a0290facb461859b027e5f50206ce9c05fc5d0e8366d1e210ade0bda556936.yml
+openapi_spec_hash: 7ca2a28f0cc0958f01709c749cc3b7a4
config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
diff --git a/api.md b/api.md
index 713b81ec..5e6ba72b 100644
--- a/api.md
+++ b/api.md
@@ -522,7 +522,7 @@ Methods:
- client.beta.datasets.retrieve(dataset_id) -> DatasetRetrieveResponse
- client.beta.datasets.list() -> DatasetListResponse
-- client.beta.datasets.appendrows(path_dataset_id, \*\*params) -> None
+- client.beta.datasets.appendrows(dataset_id, \*\*params) -> None
- client.beta.datasets.iterrows(dataset_id, \*\*params) -> DatasetIterrowsResponse
- client.beta.datasets.register(\*\*params) -> DatasetRegisterResponse
- client.beta.datasets.unregister(dataset_id) -> None
diff --git a/src/llama_stack_client/resources/beta/datasets.py b/src/llama_stack_client/resources/beta/datasets.py
index 0dd1071c..ed6dd256 100644
--- a/src/llama_stack_client/resources/beta/datasets.py
+++ b/src/llama_stack_client/resources/beta/datasets.py
@@ -115,9 +115,8 @@ def list(
def appendrows(
self,
- path_dataset_id: str,
+ dataset_id: str,
*,
- body_dataset_id: str,
rows: Iterable[Dict[str, object]],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -130,9 +129,7 @@ def appendrows(
Append rows to a dataset.
Args:
- path_dataset_id: The ID of the dataset to append the rows to.
-
- body_dataset_id: The ID of the dataset to append the rows to.
+ dataset_id: The ID of the dataset to append the rows to.
rows: The rows to append to the dataset.
@@ -144,18 +141,12 @@ def appendrows(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not path_dataset_id:
- raise ValueError(f"Expected a non-empty value for `path_dataset_id` but received {path_dataset_id!r}")
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- f"/v1beta/datasetio/append-rows/{path_dataset_id}",
- body=maybe_transform(
- {
- "body_dataset_id": body_dataset_id,
- "rows": rows,
- },
- dataset_appendrows_params.DatasetAppendrowsParams,
- ),
+ f"/v1beta/datasetio/append-rows/{dataset_id}",
+ body=maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -393,9 +384,8 @@ async def list(
async def appendrows(
self,
- path_dataset_id: str,
+ dataset_id: str,
*,
- body_dataset_id: str,
rows: Iterable[Dict[str, object]],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -408,9 +398,7 @@ async def appendrows(
Append rows to a dataset.
Args:
- path_dataset_id: The ID of the dataset to append the rows to.
-
- body_dataset_id: The ID of the dataset to append the rows to.
+ dataset_id: The ID of the dataset to append the rows to.
rows: The rows to append to the dataset.
@@ -422,18 +410,12 @@ async def appendrows(
timeout: Override the client-level default timeout for this request, in seconds
"""
- if not path_dataset_id:
- raise ValueError(f"Expected a non-empty value for `path_dataset_id` but received {path_dataset_id!r}")
+ if not dataset_id:
+ raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- f"/v1beta/datasetio/append-rows/{path_dataset_id}",
- body=await async_maybe_transform(
- {
- "body_dataset_id": body_dataset_id,
- "rows": rows,
- },
- dataset_appendrows_params.DatasetAppendrowsParams,
- ),
+ f"/v1beta/datasetio/append-rows/{dataset_id}",
+ body=await async_maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/types/beta/dataset_appendrows_params.py b/src/llama_stack_client/types/beta/dataset_appendrows_params.py
index cf615cd3..bc7f4aca 100644
--- a/src/llama_stack_client/types/beta/dataset_appendrows_params.py
+++ b/src/llama_stack_client/types/beta/dataset_appendrows_params.py
@@ -9,16 +9,11 @@
from __future__ import annotations
from typing import Dict, Iterable
-from typing_extensions import Required, Annotated, TypedDict
-
-from ..._utils import PropertyInfo
+from typing_extensions import Required, TypedDict
__all__ = ["DatasetAppendrowsParams"]
class DatasetAppendrowsParams(TypedDict, total=False):
- body_dataset_id: Required[Annotated[str, PropertyInfo(alias="dataset_id")]]
- """The ID of the dataset to append the rows to."""
-
rows: Required[Iterable[Dict[str, object]]]
"""The rows to append to the dataset."""
diff --git a/tests/api_resources/beta/test_datasets.py b/tests/api_resources/beta/test_datasets.py
index a050fb78..8ddd7646 100644
--- a/tests/api_resources/beta/test_datasets.py
+++ b/tests/api_resources/beta/test_datasets.py
@@ -96,8 +96,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
@parametrize
def test_method_appendrows(self, client: LlamaStackClient) -> None:
dataset = client.beta.datasets.appendrows(
- path_dataset_id="dataset_id",
- body_dataset_id="dataset_id",
+ dataset_id="dataset_id",
rows=[{"foo": "bar"}],
)
assert dataset is None
@@ -105,8 +104,7 @@ def test_method_appendrows(self, client: LlamaStackClient) -> None:
@parametrize
def test_raw_response_appendrows(self, client: LlamaStackClient) -> None:
response = client.beta.datasets.with_raw_response.appendrows(
- path_dataset_id="dataset_id",
- body_dataset_id="dataset_id",
+ dataset_id="dataset_id",
rows=[{"foo": "bar"}],
)
@@ -118,8 +116,7 @@ def test_raw_response_appendrows(self, client: LlamaStackClient) -> None:
@parametrize
def test_streaming_response_appendrows(self, client: LlamaStackClient) -> None:
with client.beta.datasets.with_streaming_response.appendrows(
- path_dataset_id="dataset_id",
- body_dataset_id="dataset_id",
+ dataset_id="dataset_id",
rows=[{"foo": "bar"}],
) as response:
assert not response.is_closed
@@ -132,10 +129,9 @@ def test_streaming_response_appendrows(self, client: LlamaStackClient) -> None:
@parametrize
def test_path_params_appendrows(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_dataset_id` but received ''"):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
client.beta.datasets.with_raw_response.appendrows(
- path_dataset_id="",
- body_dataset_id="dataset_id",
+ dataset_id="",
rows=[{"foo": "bar"}],
)
@@ -363,8 +359,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
@parametrize
async def test_method_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
dataset = await async_client.beta.datasets.appendrows(
- path_dataset_id="dataset_id",
- body_dataset_id="dataset_id",
+ dataset_id="dataset_id",
rows=[{"foo": "bar"}],
)
assert dataset is None
@@ -372,8 +367,7 @@ async def test_method_appendrows(self, async_client: AsyncLlamaStackClient) -> N
@parametrize
async def test_raw_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
response = await async_client.beta.datasets.with_raw_response.appendrows(
- path_dataset_id="dataset_id",
- body_dataset_id="dataset_id",
+ dataset_id="dataset_id",
rows=[{"foo": "bar"}],
)
@@ -385,8 +379,7 @@ async def test_raw_response_appendrows(self, async_client: AsyncLlamaStackClient
@parametrize
async def test_streaming_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
async with async_client.beta.datasets.with_streaming_response.appendrows(
- path_dataset_id="dataset_id",
- body_dataset_id="dataset_id",
+ dataset_id="dataset_id",
rows=[{"foo": "bar"}],
) as response:
assert not response.is_closed
@@ -399,10 +392,9 @@ async def test_streaming_response_appendrows(self, async_client: AsyncLlamaStack
@parametrize
async def test_path_params_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_dataset_id` but received ''"):
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
await async_client.beta.datasets.with_raw_response.appendrows(
- path_dataset_id="",
- body_dataset_id="dataset_id",
+ dataset_id="",
rows=[{"foo": "bar"}],
)
From 147b88b44eb83bceb7cd6204cd79d8dafe8f8e7a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 13 Mar 2026 12:15:11 +0000
Subject: [PATCH 03/26] fix(inference): improve chat completions OpenAI
conformance
---
.stats.yml | 4 ++--
.../types/chat/completion_create_params.py | 4 ++--
.../types/chat/completion_create_response.py | 20 +++++++---------
.../types/chat/completion_list_response.py | 24 +++++++++----------
.../chat/completion_retrieve_response.py | 24 +++++++++----------
.../types/chat_completion_chunk.py | 18 +++++++-------
.../types/safety_run_shield_params.py | 4 ++--
7 files changed, 45 insertions(+), 53 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 1fce0cef..55d838b1 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 102
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-36a0290facb461859b027e5f50206ce9c05fc5d0e8366d1e210ade0bda556936.yml
-openapi_spec_hash: 7ca2a28f0cc0958f01709c749cc3b7a4
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-1aedf17a2c2aba13036e748a3e9d7811fb9e423e9dc86f9913a9cd17590e4f0b.yml
+openapi_spec_hash: 878315e3cf9cbe546888a24b8b819660
config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
diff --git a/src/llama_stack_client/types/chat/completion_create_params.py b/src/llama_stack_client/types/chat/completion_create_params.py
index 75c21b29..af533b6a 100644
--- a/src/llama_stack_client/types/chat/completion_create_params.py
+++ b/src/llama_stack_client/types/chat/completion_create_params.py
@@ -258,10 +258,10 @@ class MessageOpenAIAssistantMessageParamInputContentListOpenAIChatCompletionCont
class MessageOpenAIAssistantMessageParamInputToolCallFunction(TypedDict, total=False):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str]
+ arguments: Required[str]
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str]
+ name: Required[str]
"""Name of the function to call."""
diff --git a/src/llama_stack_client/types/chat/completion_create_response.py b/src/llama_stack_client/types/chat/completion_create_response.py
index 65d3c25b..a76b557f 100644
--- a/src/llama_stack_client/types/chat/completion_create_response.py
+++ b/src/llama_stack_client/types/chat/completion_create_response.py
@@ -32,20 +32,20 @@
class ChoiceMessageFunctionCall(BaseModel):
"""Deprecated: the name and arguments of a function that should be called."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
class ChoiceMessageToolCallFunction(BaseModel):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
@@ -157,9 +157,7 @@ class ChoiceLogprobsRefusal(BaseModel):
class ChoiceLogprobs(BaseModel):
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
content: Optional[List[ChoiceLogprobsContent]] = None
"""The log probabilities for the tokens in the message."""
@@ -181,10 +179,7 @@ class Choice(BaseModel):
"""The message from the model."""
logprobs: Optional[ChoiceLogprobs] = None
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible
- chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
class UsageCompletionTokensDetails(BaseModel):
@@ -241,5 +236,8 @@ class CompletionCreateResponse(BaseModel):
service_tier: Optional[str] = None
"""The service tier that was used for this response."""
+ system_fingerprint: Optional[str] = None
+ """System fingerprint for this completion."""
+
usage: Optional[Usage] = None
"""Token usage information for the completion."""
diff --git a/src/llama_stack_client/types/chat/completion_list_response.py b/src/llama_stack_client/types/chat/completion_list_response.py
index 8ab9037f..1f27d036 100644
--- a/src/llama_stack_client/types/chat/completion_list_response.py
+++ b/src/llama_stack_client/types/chat/completion_list_response.py
@@ -54,20 +54,20 @@
class DataChoiceMessageFunctionCall(BaseModel):
"""Deprecated: the name and arguments of a function that should be called."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
class DataChoiceMessageToolCallFunction(BaseModel):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
@@ -179,9 +179,7 @@ class DataChoiceLogprobsRefusal(BaseModel):
class DataChoiceLogprobs(BaseModel):
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
content: Optional[List[DataChoiceLogprobsContent]] = None
"""The log probabilities for the tokens in the message."""
@@ -203,10 +201,7 @@ class DataChoice(BaseModel):
"""The message from the model."""
logprobs: Optional[DataChoiceLogprobs] = None
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible
- chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
class DataInputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIChatCompletionContentPartTextParam(
@@ -339,10 +334,10 @@ class DataInputMessageOpenAIAssistantMessageParamOutputContentListOpenAIChatComp
class DataInputMessageOpenAIAssistantMessageParamOutputToolCallFunction(BaseModel):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
@@ -513,6 +508,9 @@ class Data(BaseModel):
service_tier: Optional[str] = None
"""The service tier that was used for this response."""
+ system_fingerprint: Optional[str] = None
+ """System fingerprint for this completion."""
+
usage: Optional[DataUsage] = None
"""Token usage information for the completion."""
diff --git a/src/llama_stack_client/types/chat/completion_retrieve_response.py b/src/llama_stack_client/types/chat/completion_retrieve_response.py
index a70767f9..b3d783c9 100644
--- a/src/llama_stack_client/types/chat/completion_retrieve_response.py
+++ b/src/llama_stack_client/types/chat/completion_retrieve_response.py
@@ -53,20 +53,20 @@
class ChoiceMessageFunctionCall(BaseModel):
"""Deprecated: the name and arguments of a function that should be called."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
class ChoiceMessageToolCallFunction(BaseModel):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
@@ -178,9 +178,7 @@ class ChoiceLogprobsRefusal(BaseModel):
class ChoiceLogprobs(BaseModel):
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
content: Optional[List[ChoiceLogprobsContent]] = None
"""The log probabilities for the tokens in the message."""
@@ -202,10 +200,7 @@ class Choice(BaseModel):
"""The message from the model."""
logprobs: Optional[ChoiceLogprobs] = None
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible
- chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
class InputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIChatCompletionContentPartTextParam(
@@ -336,10 +331,10 @@ class InputMessageOpenAIAssistantMessageParamOutputContentListOpenAIChatCompleti
class InputMessageOpenAIAssistantMessageParamOutputToolCallFunction(BaseModel):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
@@ -510,5 +505,8 @@ class CompletionRetrieveResponse(BaseModel):
service_tier: Optional[str] = None
"""The service tier that was used for this response."""
+ system_fingerprint: Optional[str] = None
+ """System fingerprint for this completion."""
+
usage: Optional[Usage] = None
"""Token usage information for the completion."""
diff --git a/src/llama_stack_client/types/chat_completion_chunk.py b/src/llama_stack_client/types/chat_completion_chunk.py
index 95eed9f8..69b6a4ef 100644
--- a/src/llama_stack_client/types/chat_completion_chunk.py
+++ b/src/llama_stack_client/types/chat_completion_chunk.py
@@ -31,10 +31,10 @@
class ChoiceDeltaToolCallFunction(BaseModel):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str] = None
+ arguments: str
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str] = None
+ name: str
"""Name of the function to call."""
@@ -66,7 +66,7 @@ class ChoiceDelta(BaseModel):
refusal: Optional[str] = None
"""The refusal of the delta."""
- role: Optional[str] = None
+ role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None
"""The role of the delta."""
tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
@@ -140,9 +140,7 @@ class ChoiceLogprobsRefusal(BaseModel):
class ChoiceLogprobs(BaseModel):
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
content: Optional[List[ChoiceLogprobsContent]] = None
"""The log probabilities for the tokens in the message."""
@@ -164,10 +162,7 @@ class Choice(BaseModel):
"""The reason the model stopped generating."""
logprobs: Optional[ChoiceLogprobs] = None
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible
- chat completion response.
- """
+ """The log probabilities for the tokens in the message."""
class UsageCompletionTokensDetails(BaseModel):
@@ -228,6 +223,9 @@ class ChatCompletionChunk(BaseModel):
service_tier: Optional[str] = None
"""The service tier that was used for this response."""
+ system_fingerprint: Optional[str] = None
+ """System fingerprint for this completion chunk."""
+
usage: Optional[Usage] = None
"""
Token usage information (typically included in final chunk with stream_options).
diff --git a/src/llama_stack_client/types/safety_run_shield_params.py b/src/llama_stack_client/types/safety_run_shield_params.py
index 6db5ced7..150579c1 100644
--- a/src/llama_stack_client/types/safety_run_shield_params.py
+++ b/src/llama_stack_client/types/safety_run_shield_params.py
@@ -177,10 +177,10 @@ class MessageOpenAIAssistantMessageParamInputContentListOpenAIChatCompletionCont
class MessageOpenAIAssistantMessageParamInputToolCallFunction(TypedDict, total=False):
"""Function call details for OpenAI-compatible tool calls."""
- arguments: Optional[str]
+ arguments: Required[str]
"""Arguments to pass to the function as a JSON string."""
- name: Optional[str]
+ name: Required[str]
"""Name of the function to call."""
From f46809696ddf1f179cc26984facfcbb7f9264730 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 13 Mar 2026 16:54:55 +0000
Subject: [PATCH 04/26] chore(internal): version bump
---
uv.lock | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/uv.lock b/uv.lock
index 514ce71d..64e72181 100644
--- a/uv.lock
+++ b/uv.lock
@@ -517,7 +517,7 @@ wheels = [
[[package]]
name = "llama-stack-client"
-version = "0.5.0a2"
+version = "0.6.1a1"
source = { editable = "." }
dependencies = [
{ name = "anyio" },
From b4c2f15b16872730a9c254b1b2dfc02aba223a71 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 16 Mar 2026 13:46:36 +0000
Subject: [PATCH 05/26] feat: Add stream_options parameter support
---
.stats.yml | 4 ++--
.../resources/responses/responses.py | 22 +++++++++++++++++++
.../types/response_create_params.py | 11 ++++++++++
tests/api_resources/test_responses.py | 4 ++++
4 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 55d838b1..7d0d6d1d 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 102
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-1aedf17a2c2aba13036e748a3e9d7811fb9e423e9dc86f9913a9cd17590e4f0b.yml
-openapi_spec_hash: 878315e3cf9cbe546888a24b8b819660
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-c9705d79751306cd0465c0221af7fe922c53733be0d98f2aaa2378c0a6ef9b88.yml
+openapi_spec_hash: bab7c39fa1e9cb5b7cc03436957161c7
config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py
index 6ea1ffe3..6ad82e30 100644
--- a/src/llama_stack_client/resources/responses/responses.py
+++ b/src/llama_stack_client/resources/responses/responses.py
@@ -114,6 +114,7 @@ def create(
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -179,6 +180,8 @@ def create(
stream: Whether to stream the response.
+ stream_options: Options that control streamed response behavior.
+
temperature: Sampling temperature.
text: Text response configuration for OpenAI responses.
@@ -250,6 +253,7 @@ def create(
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -315,6 +319,8 @@ def create(
store: Whether to store the response in the database.
+ stream_options: Options that control streamed response behavior.
+
temperature: Sampling temperature.
text: Text response configuration for OpenAI responses.
@@ -386,6 +392,7 @@ def create(
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -451,6 +458,8 @@ def create(
store: Whether to store the response in the database.
+ stream_options: Options that control streamed response behavior.
+
temperature: Sampling temperature.
text: Text response configuration for OpenAI responses.
@@ -522,6 +531,7 @@ def create(
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -562,6 +572,7 @@ def create(
"service_tier": service_tier,
"store": store,
"stream": stream,
+ "stream_options": stream_options,
"temperature": temperature,
"text": text,
"tool_choice": tool_choice,
@@ -779,6 +790,7 @@ async def create(
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -844,6 +856,8 @@ async def create(
stream: Whether to stream the response.
+ stream_options: Options that control streamed response behavior.
+
temperature: Sampling temperature.
text: Text response configuration for OpenAI responses.
@@ -915,6 +929,7 @@ async def create(
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -980,6 +995,8 @@ async def create(
store: Whether to store the response in the database.
+ stream_options: Options that control streamed response behavior.
+
temperature: Sampling temperature.
text: Text response configuration for OpenAI responses.
@@ -1051,6 +1068,7 @@ async def create(
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -1116,6 +1134,8 @@ async def create(
store: Whether to store the response in the database.
+ stream_options: Options that control streamed response behavior.
+
temperature: Sampling temperature.
text: Text response configuration for OpenAI responses.
@@ -1187,6 +1207,7 @@ async def create(
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
tool_choice: Optional[response_create_params.ToolChoice] | Omit = omit,
@@ -1227,6 +1248,7 @@ async def create(
"service_tier": service_tier,
"store": store,
"stream": stream,
+ "stream_options": stream_options,
"temperature": temperature,
"text": text,
"tool_choice": tool_choice,
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py
index 06ddbdce..7c08f512 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/llama_stack_client/types/response_create_params.py
@@ -53,6 +53,7 @@
"PromptVariablesOpenAIResponseInputMessageContentImage",
"PromptVariablesOpenAIResponseInputMessageContentFile",
"Reasoning",
+ "StreamOptions",
"Text",
"TextFormat",
"ToolChoice",
@@ -168,6 +169,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
store: Optional[bool]
"""Whether to store the response in the database."""
+ stream_options: Optional[StreamOptions]
+ """Options that control streamed response behavior."""
+
temperature: Optional[float]
"""Sampling temperature."""
@@ -718,6 +722,13 @@ class Reasoning(TypedDict, total=False):
effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]]
+class StreamOptions(TypedDict, total=False):
+ """Options that control streamed response behavior."""
+
+ include_obfuscation: bool
+ """Whether to obfuscate sensitive information in streamed output."""
+
+
class TextFormat(TypedDict, total=False):
"""Configuration for Responses API text format."""
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index f7d540b0..2b85ceb0 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -70,6 +70,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
service_tier="auto",
store=True,
stream=False,
+ stream_options={"include_obfuscation": True},
temperature=0,
text={
"format": {
@@ -162,6 +163,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
safety_identifier="safety_identifier",
service_tier="auto",
store=True,
+ stream_options={"include_obfuscation": True},
temperature=0,
text={
"format": {
@@ -371,6 +373,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
service_tier="auto",
store=True,
stream=False,
+ stream_options={"include_obfuscation": True},
temperature=0,
text={
"format": {
@@ -463,6 +466,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
safety_identifier="safety_identifier",
service_tier="auto",
store=True,
+ stream_options={"include_obfuscation": True},
temperature=0,
text={
"format": {
From accfb0f5b355fd76d4ea452c040d660067e2c666 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 16 Mar 2026 14:08:03 +0000
Subject: [PATCH 06/26] codegen metadata
---
.stats.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 7d0d6d1d..0924840b 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 102
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-c9705d79751306cd0465c0221af7fe922c53733be0d98f2aaa2378c0a6ef9b88.yml
-openapi_spec_hash: bab7c39fa1e9cb5b7cc03436957161c7
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-514340e257a7c8ccfb8f64403fa1aa5824cd256a774c49c232f539ea7590262f.yml
+openapi_spec_hash: 903087bc571fae827d1c6883580864f8
config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
From f6836f9dacef1b9b26774fcfaf82689ae00f374a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 16 Mar 2026 17:33:13 +0000
Subject: [PATCH 07/26] fix(pydantic): do not pass `by_alias` unless set
---
src/llama_stack_client/_compat.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/llama_stack_client/_compat.py b/src/llama_stack_client/_compat.py
index a099b003..db494546 100644
--- a/src/llama_stack_client/_compat.py
+++ b/src/llama_stack_client/_compat.py
@@ -8,7 +8,7 @@
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
from datetime import date, datetime
-from typing_extensions import Self, Literal
+from typing_extensions import Self, Literal, TypedDict
import pydantic
from pydantic.fields import FieldInfo
@@ -137,6 +137,10 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
return model.model_dump_json(indent=indent)
+class _ModelDumpKwargs(TypedDict, total=False):
+ by_alias: bool
+
+
def model_dump(
model: pydantic.BaseModel,
*,
@@ -148,6 +152,9 @@ def model_dump(
by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
+ kwargs: _ModelDumpKwargs = {}
+ if by_alias is not None:
+ kwargs["by_alias"] = by_alias
return model.model_dump(
mode=mode,
exclude=exclude,
@@ -155,7 +162,7 @@ def model_dump(
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
- by_alias=by_alias,
+ **kwargs,
)
return cast(
"dict[str, Any]",
From 50ea4d7fd98a86726f6825d911507b7fc96e2e60 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 16 Mar 2026 19:02:14 +0000
Subject: [PATCH 08/26] fix(deps): bump minimum typing-extensions version
---
pyproject.toml | 2 +-
requirements-dev.lock | 25 +--
uv.lock | 401 +++++++++++++++++++++++-------------------
3 files changed, 232 insertions(+), 196 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 6d8cd1b8..1fd52862 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,7 +9,7 @@ authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
- "typing-extensions>=4.7, <5",
+ "typing-extensions>=4.7, <5", "typing-extensions>=4.14, <5",
"anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 97191aae..bd51b81f 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -7,7 +7,7 @@ anyio==4.12.1
# via
# httpx
# llama-stack-client
-black==26.1.0
+black==26.3.1
certifi==2026.1.4
# via
# httpcore
@@ -15,7 +15,7 @@ certifi==2026.1.4
# requests
cfgv==3.5.0
# via pre-commit
-charset-normalizer==3.4.4
+charset-normalizer==3.4.6
# via requests
click==8.3.1
# via
@@ -33,8 +33,10 @@ distro==1.9.0
# via llama-stack-client
execnet==2.1.2
# via pytest-xdist
-filelock==3.20.3
- # via virtualenv
+filelock==3.25.2
+ # via
+ # python-discovery
+ # virtualenv
fire==0.7.1
# via llama-stack-client
h11==0.16.0
@@ -45,7 +47,7 @@ httpx==0.28.1
# via
# llama-stack-client
# respx
-identify==2.6.16
+identify==2.6.18
# via pre-commit
idna==3.11
# via
@@ -68,21 +70,22 @@ nodeenv==1.10.0
# via
# pre-commit
# pyright
-numpy==2.4.2
+numpy==2.4.3
# via pandas
packaging==25.0
# via
# black
# pytest
-pandas==3.0.0
+pandas==3.0.1
# via llama-stack-client
pathspec==1.0.3
# via
# black
# mypy
-platformdirs==4.5.1
+platformdirs==4.9.4
# via
# black
+ # python-discovery
# virtualenv
pluggy==1.6.0
# via pytest
@@ -108,13 +111,15 @@ pytest-asyncio==1.3.0
pytest-xdist==3.8.0
python-dateutil==2.9.0.post0
# via pandas
+python-discovery==1.2.1
+ # via virtualenv
pytokens==0.4.1
# via black
pyyaml==6.0.3
# via
# pre-commit
# pyaml
-requests==2.32.5
+requests==2.33.0
# via llama-stack-client
respx==0.22.0
rich==14.2.0
@@ -147,7 +152,7 @@ tzdata==2025.3 ; sys_platform == 'emscripten' or sys_platform == 'win32'
# via pandas
urllib3==2.6.3
# via requests
-virtualenv==20.36.1
+virtualenv==21.2.0
# via pre-commit
wcwidth==0.6.0
# via prompt-toolkit
diff --git a/uv.lock b/uv.lock
index 64e72181..857e7067 100644
--- a/uv.lock
+++ b/uv.lock
@@ -154,7 +154,7 @@ wheels = [
[[package]]
name = "black"
-version = "26.1.0"
+version = "26.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
@@ -164,24 +164,24 @@ dependencies = [
{ name = "platformdirs" },
{ name = "pytokens" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/13/88/560b11e521c522440af991d46848a2bde64b5f7202ec14e1f46f9509d328/black-26.1.0.tar.gz", hash = "sha256:d294ac3340eef9c9eb5d29288e96dc719ff269a88e27b396340459dd85da4c58", size = 658785, upload-time = "2026-01-18T04:50:11.993Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/e1/c5/61175d618685d42b005847464b8fb4743a67b1b8fdb75e50e5a96c31a27a/black-26.3.1.tar.gz", hash = "sha256:2c50f5063a9641c7eed7795014ba37b0f5fa227f3d408b968936e24bc0566b07", size = 666155, upload-time = "2026-03-12T03:36:03.593Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/f5/13/710298938a61f0f54cdb4d1c0baeb672c01ff0358712eddaf29f76d32a0b/black-26.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6eeca41e70b5f5c84f2f913af857cf2ce17410847e1d54642e658e078da6544f", size = 1878189, upload-time = "2026-01-18T04:59:30.682Z" },
- { url = "https://files.pythonhosted.org/packages/79/a6/5179beaa57e5dbd2ec9f1c64016214057b4265647c62125aa6aeffb05392/black-26.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dd39eef053e58e60204f2cdf059e2442e2eb08f15989eefe259870f89614c8b6", size = 1700178, upload-time = "2026-01-18T04:59:32.387Z" },
- { url = "https://files.pythonhosted.org/packages/8c/04/c96f79d7b93e8f09d9298b333ca0d31cd9b2ee6c46c274fd0f531de9dc61/black-26.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9459ad0d6cd483eacad4c6566b0f8e42af5e8b583cee917d90ffaa3778420a0a", size = 1777029, upload-time = "2026-01-18T04:59:33.767Z" },
- { url = "https://files.pythonhosted.org/packages/49/f9/71c161c4c7aa18bdda3776b66ac2dc07aed62053c7c0ff8bbda8c2624fe2/black-26.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a19915ec61f3a8746e8b10adbac4a577c6ba9851fa4a9e9fbfbcf319887a5791", size = 1406466, upload-time = "2026-01-18T04:59:35.177Z" },
- { url = "https://files.pythonhosted.org/packages/4a/8b/a7b0f974e473b159d0ac1b6bcefffeb6bec465898a516ee5cc989503cbc7/black-26.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:643d27fb5facc167c0b1b59d0315f2674a6e950341aed0fc05cf307d22bf4954", size = 1216393, upload-time = "2026-01-18T04:59:37.18Z" },
- { url = "https://files.pythonhosted.org/packages/79/04/fa2f4784f7237279332aa735cdfd5ae2e7730db0072fb2041dadda9ae551/black-26.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ba1d768fbfb6930fc93b0ecc32a43d8861ded16f47a40f14afa9bb04ab93d304", size = 1877781, upload-time = "2026-01-18T04:59:39.054Z" },
- { url = "https://files.pythonhosted.org/packages/cf/ad/5a131b01acc0e5336740a039628c0ab69d60cf09a2c87a4ec49f5826acda/black-26.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2b807c240b64609cb0e80d2200a35b23c7df82259f80bef1b2c96eb422b4aac9", size = 1699670, upload-time = "2026-01-18T04:59:41.005Z" },
- { url = "https://files.pythonhosted.org/packages/da/7c/b05f22964316a52ab6b4265bcd52c0ad2c30d7ca6bd3d0637e438fc32d6e/black-26.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1de0f7d01cc894066a1153b738145b194414cc6eeaad8ef4397ac9abacf40f6b", size = 1775212, upload-time = "2026-01-18T04:59:42.545Z" },
- { url = "https://files.pythonhosted.org/packages/a6/a3/e8d1526bea0446e040193185353920a9506eab60a7d8beb062029129c7d2/black-26.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:91a68ae46bf07868963671e4d05611b179c2313301bd756a89ad4e3b3db2325b", size = 1409953, upload-time = "2026-01-18T04:59:44.357Z" },
- { url = "https://files.pythonhosted.org/packages/c7/5a/d62ebf4d8f5e3a1daa54adaab94c107b57be1b1a2f115a0249b41931e188/black-26.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:be5e2fe860b9bd9edbf676d5b60a9282994c03fbbd40fe8f5e75d194f96064ca", size = 1217707, upload-time = "2026-01-18T04:59:45.719Z" },
- { url = "https://files.pythonhosted.org/packages/6a/83/be35a175aacfce4b05584ac415fd317dd6c24e93a0af2dcedce0f686f5d8/black-26.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:9dc8c71656a79ca49b8d3e2ce8103210c9481c57798b48deeb3a8bb02db5f115", size = 1871864, upload-time = "2026-01-18T04:59:47.586Z" },
- { url = "https://files.pythonhosted.org/packages/a5/f5/d33696c099450b1274d925a42b7a030cd3ea1f56d72e5ca8bbed5f52759c/black-26.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b22b3810451abe359a964cc88121d57f7bce482b53a066de0f1584988ca36e79", size = 1701009, upload-time = "2026-01-18T04:59:49.443Z" },
- { url = "https://files.pythonhosted.org/packages/1b/87/670dd888c537acb53a863bc15abbd85b22b429237d9de1b77c0ed6b79c42/black-26.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:53c62883b3f999f14e5d30b5a79bd437236658ad45b2f853906c7cbe79de00af", size = 1767806, upload-time = "2026-01-18T04:59:50.769Z" },
- { url = "https://files.pythonhosted.org/packages/fe/9c/cd3deb79bfec5bcf30f9d2100ffeec63eecce826eb63e3961708b9431ff1/black-26.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:f016baaadc423dc960cdddf9acae679e71ee02c4c341f78f3179d7e4819c095f", size = 1433217, upload-time = "2026-01-18T04:59:52.218Z" },
- { url = "https://files.pythonhosted.org/packages/4e/29/f3be41a1cf502a283506f40f5d27203249d181f7a1a2abce1c6ce188035a/black-26.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:66912475200b67ef5a0ab665011964bf924745103f51977a78b4fb92a9fc1bf0", size = 1245773, upload-time = "2026-01-18T04:59:54.457Z" },
- { url = "https://files.pythonhosted.org/packages/e4/3d/51bdb3ecbfadfaf825ec0c75e1de6077422b4afa2091c6c9ba34fbfc0c2d/black-26.1.0-py3-none-any.whl", hash = "sha256:1054e8e47ebd686e078c0bb0eaf31e6ce69c966058d122f2c0c950311f9f3ede", size = 204010, upload-time = "2026-01-18T04:50:09.978Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/f8/da5eae4fc75e78e6dceb60624e1b9662ab00d6b452996046dfa9b8a6025b/black-26.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e6f89631eb88a7302d416594a32faeee9fb8fb848290da9d0a5f2903519fc1", size = 1895920, upload-time = "2026-03-12T03:40:13.921Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/9f/04e6f26534da2e1629b2b48255c264cabf5eedc5141d04516d9d68a24111/black-26.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41cd2012d35b47d589cb8a16faf8a32ef7a336f56356babd9fcf70939ad1897f", size = 1718499, upload-time = "2026-03-12T03:40:15.239Z" },
+ { url = "https://files.pythonhosted.org/packages/04/91/a5935b2a63e31b331060c4a9fdb5a6c725840858c599032a6f3aac94055f/black-26.3.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f76ff19ec5297dd8e66eb64deda23631e642c9393ab592826fd4bdc97a4bce7", size = 1794994, upload-time = "2026-03-12T03:40:17.124Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/0a/86e462cdd311a3c2a8ece708d22aba17d0b2a0d5348ca34b40cdcbea512e/black-26.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:ddb113db38838eb9f043623ba274cfaf7d51d5b0c22ecb30afe58b1bb8322983", size = 1420867, upload-time = "2026-03-12T03:40:18.83Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/e5/22515a19cb7eaee3440325a6b0d95d2c0e88dd180cb011b12ae488e031d1/black-26.3.1-cp312-cp312-win_arm64.whl", hash = "sha256:dfdd51fc3e64ea4f35873d1b3fb25326773d55d2329ff8449139ebaad7357efb", size = 1230124, upload-time = "2026-03-12T03:40:20.425Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/77/5728052a3c0450c53d9bb3945c4c46b91baa62b2cafab6801411b6271e45/black-26.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:855822d90f884905362f602880ed8b5df1b7e3ee7d0db2502d4388a954cc8c54", size = 1895034, upload-time = "2026-03-12T03:40:21.813Z" },
+ { url = "https://files.pythonhosted.org/packages/52/73/7cae55fdfdfbe9d19e9a8d25d145018965fe2079fa908101c3733b0c55a0/black-26.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8a33d657f3276328ce00e4d37fe70361e1ec7614da5d7b6e78de5426cb56332f", size = 1718503, upload-time = "2026-03-12T03:40:23.666Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/87/af89ad449e8254fdbc74654e6467e3c9381b61472cc532ee350d28cfdafb/black-26.3.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f1cd08e99d2f9317292a311dfe578fd2a24b15dbce97792f9c4d752275c1fa56", size = 1793557, upload-time = "2026-03-12T03:40:25.497Z" },
+ { url = "https://files.pythonhosted.org/packages/43/10/d6c06a791d8124b843bf325ab4ac7d2f5b98731dff84d6064eafd687ded1/black-26.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:c7e72339f841b5a237ff14f7d3880ddd0fc7f98a1199e8c4327f9a4f478c1839", size = 1422766, upload-time = "2026-03-12T03:40:27.14Z" },
+ { url = "https://files.pythonhosted.org/packages/59/4f/40a582c015f2d841ac24fed6390bd68f0fc896069ff3a886317959c9daf8/black-26.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:afc622538b430aa4c8c853f7f63bc582b3b8030fd8c80b70fb5fa5b834e575c2", size = 1232140, upload-time = "2026-03-12T03:40:28.882Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/da/e36e27c9cebc1311b7579210df6f1c86e50f2d7143ae4fcf8a5017dc8809/black-26.3.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2d6bfaf7fd0993b420bed691f20f9492d53ce9a2bcccea4b797d34e947318a78", size = 1889234, upload-time = "2026-03-12T03:40:30.964Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/7b/9871acf393f64a5fa33668c19350ca87177b181f44bb3d0c33b2d534f22c/black-26.3.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f89f2ab047c76a9c03f78d0d66ca519e389519902fa27e7a91117ef7611c0568", size = 1720522, upload-time = "2026-03-12T03:40:32.346Z" },
+ { url = "https://files.pythonhosted.org/packages/03/87/e766c7f2e90c07fb7586cc787c9ae6462b1eedab390191f2b7fc7f6170a9/black-26.3.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b07fc0dab849d24a80a29cfab8d8a19187d1c4685d8a5e6385a5ce323c1f015f", size = 1787824, upload-time = "2026-03-12T03:40:33.636Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/94/2424338fb2d1875e9e83eed4c8e9c67f6905ec25afd826a911aea2b02535/black-26.3.1-cp314-cp314-win_amd64.whl", hash = "sha256:0126ae5b7c09957da2bdbd91a9ba1207453feada9e9fe51992848658c6c8e01c", size = 1445855, upload-time = "2026-03-12T03:40:35.442Z" },
+ { url = "https://files.pythonhosted.org/packages/86/43/0c3338bd928afb8ee7471f1a4eec3bdbe2245ccb4a646092a222e8669840/black-26.3.1-cp314-cp314-win_arm64.whl", hash = "sha256:92c0ec1f2cc149551a2b7b47efc32c866406b6891b0ee4625e95967c8f4acfb1", size = 1258109, upload-time = "2026-03-12T03:40:36.832Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/0d/52d98722666d6fc6c3dd4c76df339501d6efd40e0ff95e6186a7b7f0befd/black-26.3.1-py3-none-any.whl", hash = "sha256:2bd5aa94fc267d38bb21a70d7410a89f1a1d318841855f698746f8e7f51acd1b", size = 207542, upload-time = "2026-03-12T03:36:01.668Z" },
]
[[package]]
@@ -204,59 +204,75 @@ wheels = [
[[package]]
name = "charset-normalizer"
-version = "3.4.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" },
- { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" },
- { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" },
- { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" },
- { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" },
- { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" },
- { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" },
- { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" },
- { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" },
- { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" },
- { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" },
- { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" },
- { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" },
- { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" },
- { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" },
- { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" },
- { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" },
- { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" },
- { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" },
- { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" },
- { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" },
- { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" },
- { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" },
- { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" },
- { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" },
- { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" },
- { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" },
- { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" },
- { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" },
- { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" },
- { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" },
- { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" },
- { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" },
- { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" },
- { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" },
- { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" },
- { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" },
- { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" },
- { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" },
- { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" },
- { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" },
- { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" },
- { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" },
- { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" },
- { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" },
- { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" },
- { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" },
- { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" },
- { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" },
+version = "3.4.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/7b/60/e3bec1881450851b087e301bedc3daa9377a4d45f1c26aa90b0b235e38aa/charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6", size = 143363, upload-time = "2026-03-15T18:53:25.478Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/62/c0815c992c9545347aeea7859b50dc9044d147e2e7278329c6e02ac9a616/charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab", size = 295154, upload-time = "2026-03-15T18:50:50.88Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/37/bdca6613c2e3c58c7421891d80cc3efa1d32e882f7c4a7ee6039c3fc951a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21", size = 199191, upload-time = "2026-03-15T18:50:52.658Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/92/9934d1bbd69f7f398b38c5dae1cbf9cc672e7c34a4adf7b17c0a9c17d15d/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2", size = 218674, upload-time = "2026-03-15T18:50:54.102Z" },
+ { url = "https://files.pythonhosted.org/packages/af/90/25f6ab406659286be929fd89ab0e78e38aa183fc374e03aa3c12d730af8a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff", size = 215259, upload-time = "2026-03-15T18:50:55.616Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/ef/79a463eb0fff7f96afa04c1d4c51f8fc85426f918db467854bfb6a569ce3/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5", size = 207276, upload-time = "2026-03-15T18:50:57.054Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/72/d0426afec4b71dc159fa6b4e68f868cd5a3ecd918fec5813a15d292a7d10/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0", size = 195161, upload-time = "2026-03-15T18:50:58.686Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/18/c82b06a68bfcb6ce55e508225d210c7e6a4ea122bfc0748892f3dc4e8e11/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a", size = 203452, upload-time = "2026-03-15T18:51:00.196Z" },
+ { url = "https://files.pythonhosted.org/packages/44/d6/0c25979b92f8adafdbb946160348d8d44aa60ce99afdc27df524379875cb/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2", size = 202272, upload-time = "2026-03-15T18:51:01.703Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/3d/7fea3e8fe84136bebbac715dd1221cc25c173c57a699c030ab9b8900cbb7/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5", size = 195622, upload-time = "2026-03-15T18:51:03.526Z" },
+ { url = "https://files.pythonhosted.org/packages/57/8a/d6f7fd5cb96c58ef2f681424fbca01264461336d2a7fc875e4446b1f1346/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6", size = 220056, upload-time = "2026-03-15T18:51:05.269Z" },
+ { url = "https://files.pythonhosted.org/packages/16/50/478cdda782c8c9c3fb5da3cc72dd7f331f031e7f1363a893cdd6ca0f8de0/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d", size = 203751, upload-time = "2026-03-15T18:51:06.858Z" },
+ { url = "https://files.pythonhosted.org/packages/75/fc/cc2fcac943939c8e4d8791abfa139f685e5150cae9f94b60f12520feaa9b/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2", size = 216563, upload-time = "2026-03-15T18:51:08.564Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/b7/a4add1d9a5f68f3d037261aecca83abdb0ab15960a3591d340e829b37298/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923", size = 209265, upload-time = "2026-03-15T18:51:10.312Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/18/c094561b5d64a24277707698e54b7f67bd17a4f857bbfbb1072bba07c8bf/charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4", size = 144229, upload-time = "2026-03-15T18:51:11.694Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/20/0567efb3a8fd481b8f34f739ebddc098ed062a59fed41a8d193a61939e8f/charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb", size = 154277, upload-time = "2026-03-15T18:51:13.004Z" },
+ { url = "https://files.pythonhosted.org/packages/15/57/28d79b44b51933119e21f65479d0864a8d5893e494cf5daab15df0247c17/charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4", size = 142817, upload-time = "2026-03-15T18:51:14.408Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/1d/4fdabeef4e231153b6ed7567602f3b68265ec4e5b76d6024cf647d43d981/charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f", size = 294823, upload-time = "2026-03-15T18:51:15.755Z" },
+ { url = "https://files.pythonhosted.org/packages/47/7b/20e809b89c69d37be748d98e84dce6820bf663cf19cf6b942c951a3e8f41/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843", size = 198527, upload-time = "2026-03-15T18:51:17.177Z" },
+ { url = "https://files.pythonhosted.org/packages/37/a6/4f8d27527d59c039dce6f7622593cdcd3d70a8504d87d09eb11e9fdc6062/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf", size = 218388, upload-time = "2026-03-15T18:51:18.934Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/9b/4770ccb3e491a9bacf1c46cc8b812214fe367c86a96353ccc6daf87b01ec/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8", size = 214563, upload-time = "2026-03-15T18:51:20.374Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/58/a199d245894b12db0b957d627516c78e055adc3a0d978bc7f65ddaf7c399/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9", size = 206587, upload-time = "2026-03-15T18:51:21.807Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/70/3def227f1ec56f5c69dfc8392b8bd63b11a18ca8178d9211d7cc5e5e4f27/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88", size = 194724, upload-time = "2026-03-15T18:51:23.508Z" },
+ { url = "https://files.pythonhosted.org/packages/58/ab/9318352e220c05efd31c2779a23b50969dc94b985a2efa643ed9077bfca5/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84", size = 202956, upload-time = "2026-03-15T18:51:25.239Z" },
+ { url = "https://files.pythonhosted.org/packages/75/13/f3550a3ac25b70f87ac98c40d3199a8503676c2f1620efbf8d42095cfc40/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd", size = 201923, upload-time = "2026-03-15T18:51:26.682Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/db/c5c643b912740b45e8eec21de1bbab8e7fc085944d37e1e709d3dcd9d72f/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c", size = 195366, upload-time = "2026-03-15T18:51:28.129Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/67/3b1c62744f9b2448443e0eb160d8b001c849ec3fef591e012eda6484787c/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194", size = 219752, upload-time = "2026-03-15T18:51:29.556Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/98/32ffbaf7f0366ffb0445930b87d103f6b406bc2c271563644bde8a2b1093/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc", size = 203296, upload-time = "2026-03-15T18:51:30.921Z" },
+ { url = "https://files.pythonhosted.org/packages/41/12/5d308c1bbe60cabb0c5ef511574a647067e2a1f631bc8634fcafaccd8293/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f", size = 215956, upload-time = "2026-03-15T18:51:32.399Z" },
+ { url = "https://files.pythonhosted.org/packages/53/e9/5f85f6c5e20669dbe56b165c67b0260547dea97dba7e187938833d791687/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2", size = 208652, upload-time = "2026-03-15T18:51:34.214Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/11/897052ea6af56df3eef3ca94edafee410ca699ca0c7b87960ad19932c55e/charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d", size = 143940, upload-time = "2026-03-15T18:51:36.15Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/5c/724b6b363603e419829f561c854b87ed7c7e31231a7908708ac086cdf3e2/charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389", size = 154101, upload-time = "2026-03-15T18:51:37.876Z" },
+ { url = "https://files.pythonhosted.org/packages/01/a5/7abf15b4c0968e47020f9ca0935fb3274deb87cb288cd187cad92e8cdffd/charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f", size = 143109, upload-time = "2026-03-15T18:51:39.565Z" },
+ { url = "https://files.pythonhosted.org/packages/25/6f/ffe1e1259f384594063ea1869bfb6be5cdb8bc81020fc36c3636bc8302a1/charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8", size = 294458, upload-time = "2026-03-15T18:51:41.134Z" },
+ { url = "https://files.pythonhosted.org/packages/56/60/09bb6c13a8c1016c2ed5c6a6488e4ffef506461aa5161662bd7636936fb1/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421", size = 199277, upload-time = "2026-03-15T18:51:42.953Z" },
+ { url = "https://files.pythonhosted.org/packages/00/50/dcfbb72a5138bbefdc3332e8d81a23494bf67998b4b100703fd15fa52d81/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2", size = 218758, upload-time = "2026-03-15T18:51:44.339Z" },
+ { url = "https://files.pythonhosted.org/packages/03/b3/d79a9a191bb75f5aa81f3aaaa387ef29ce7cb7a9e5074ba8ea095cc073c2/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30", size = 215299, upload-time = "2026-03-15T18:51:45.871Z" },
+ { url = "https://files.pythonhosted.org/packages/76/7e/bc8911719f7084f72fd545f647601ea3532363927f807d296a8c88a62c0d/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db", size = 206811, upload-time = "2026-03-15T18:51:47.308Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/40/c430b969d41dda0c465aa36cc7c2c068afb67177bef50905ac371b28ccc7/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8", size = 193706, upload-time = "2026-03-15T18:51:48.849Z" },
+ { url = "https://files.pythonhosted.org/packages/48/15/e35e0590af254f7df984de1323640ef375df5761f615b6225ba8deb9799a/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815", size = 202706, upload-time = "2026-03-15T18:51:50.257Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/bd/f736f7b9cc5e93a18b794a50346bb16fbfd6b37f99e8f306f7951d27c17c/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a", size = 202497, upload-time = "2026-03-15T18:51:52.012Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/ba/2cc9e3e7dfdf7760a6ed8da7446d22536f3d0ce114ac63dee2a5a3599e62/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43", size = 193511, upload-time = "2026-03-15T18:51:53.723Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/cb/5be49b5f776e5613be07298c80e1b02a2d900f7a7de807230595c85a8b2e/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0", size = 220133, upload-time = "2026-03-15T18:51:55.333Z" },
+ { url = "https://files.pythonhosted.org/packages/83/43/99f1b5dad345accb322c80c7821071554f791a95ee50c1c90041c157ae99/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1", size = 203035, upload-time = "2026-03-15T18:51:56.736Z" },
+ { url = "https://files.pythonhosted.org/packages/87/9a/62c2cb6a531483b55dddff1a68b3d891a8b498f3ca555fbcf2978e804d9d/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f", size = 216321, upload-time = "2026-03-15T18:51:58.17Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/79/94a010ff81e3aec7c293eb82c28f930918e517bc144c9906a060844462eb/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815", size = 208973, upload-time = "2026-03-15T18:51:59.998Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/57/4ecff6d4ec8585342f0c71bc03efaa99cb7468f7c91a57b105bcd561cea8/charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d", size = 144610, upload-time = "2026-03-15T18:52:02.213Z" },
+ { url = "https://files.pythonhosted.org/packages/80/94/8434a02d9d7f168c25767c64671fead8d599744a05d6a6c877144c754246/charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f", size = 154962, upload-time = "2026-03-15T18:52:03.658Z" },
+ { url = "https://files.pythonhosted.org/packages/46/4c/48f2cdbfd923026503dfd67ccea45c94fd8fe988d9056b468579c66ed62b/charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e", size = 143595, upload-time = "2026-03-15T18:52:05.123Z" },
+ { url = "https://files.pythonhosted.org/packages/31/93/8878be7569f87b14f1d52032946131bcb6ebbd8af3e20446bc04053dc3f1/charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866", size = 314828, upload-time = "2026-03-15T18:52:06.831Z" },
+ { url = "https://files.pythonhosted.org/packages/06/b6/fae511ca98aac69ecc35cde828b0a3d146325dd03d99655ad38fc2cc3293/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc", size = 208138, upload-time = "2026-03-15T18:52:08.239Z" },
+ { url = "https://files.pythonhosted.org/packages/54/57/64caf6e1bf07274a1e0b7c160a55ee9e8c9ec32c46846ce59b9c333f7008/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e", size = 224679, upload-time = "2026-03-15T18:52:10.043Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/cb/9ff5a25b9273ef160861b41f6937f86fae18b0792fe0a8e75e06acb08f1d/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077", size = 223475, upload-time = "2026-03-15T18:52:11.854Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/97/440635fc093b8d7347502a377031f9605a1039c958f3cd18dcacffb37743/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f", size = 215230, upload-time = "2026-03-15T18:52:13.325Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/24/afff630feb571a13f07c8539fbb502d2ab494019492aaffc78ef41f1d1d0/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e", size = 199045, upload-time = "2026-03-15T18:52:14.752Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/17/d1399ecdaf7e0498c327433e7eefdd862b41236a7e484355b8e0e5ebd64b/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484", size = 211658, upload-time = "2026-03-15T18:52:16.278Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/38/16baa0affb957b3d880e5ac2144caf3f9d7de7bc4a91842e447fbb5e8b67/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7", size = 210769, upload-time = "2026-03-15T18:52:17.782Z" },
+ { url = "https://files.pythonhosted.org/packages/05/34/c531bc6ac4c21da9ddfddb3107be2287188b3ea4b53b70fc58f2a77ac8d8/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff", size = 201328, upload-time = "2026-03-15T18:52:19.553Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/73/a5a1e9ca5f234519c1953608a03fe109c306b97fdfb25f09182babad51a7/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e", size = 225302, upload-time = "2026-03-15T18:52:21.043Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/f6/cd782923d112d296294dea4bcc7af5a7ae0f86ab79f8fefbda5526b6cfc0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659", size = 211127, upload-time = "2026-03-15T18:52:22.491Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/c5/0b6898950627af7d6103a449b22320372c24c6feda91aa24e201a478d161/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602", size = 222840, upload-time = "2026-03-15T18:52:24.113Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/25/c4bba773bef442cbdc06111d40daa3de5050a676fa26e85090fc54dd12f0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407", size = 216890, upload-time = "2026-03-15T18:52:25.541Z" },
+ { url = "https://files.pythonhosted.org/packages/35/1a/05dacadb0978da72ee287b0143097db12f2e7e8d3ffc4647da07a383b0b7/charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579", size = 155379, upload-time = "2026-03-15T18:52:27.05Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/7a/d269d834cb3a76291651256f3b9a5945e81d0a49ab9f4a498964e83c0416/charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4", size = 169043, upload-time = "2026-03-15T18:52:28.502Z" },
+ { url = "https://files.pythonhosted.org/packages/23/06/28b29fba521a37a8932c6a84192175c34d49f84a6d4773fa63d05f9aff22/charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c", size = 148523, upload-time = "2026-03-15T18:52:29.956Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/68/687187c7e26cb24ccbd88e5069f5ef00eba804d36dde11d99aad0838ab45/charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69", size = 61455, upload-time = "2026-03-15T18:53:23.833Z" },
]
[[package]]
@@ -318,11 +334,11 @@ wheels = [
[[package]]
name = "filelock"
-version = "3.20.3"
+version = "3.25.2"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/94/b8/00651a0f559862f3bb7d6f7477b192afe3f583cc5e26403b44e59a55ab34/filelock-3.25.2.tar.gz", hash = "sha256:b64ece2b38f4ca29dd3e810287aa8c48182bbecd1ae6e9ae126c9b35f1382694", size = 40480, upload-time = "2026-03-11T20:45:38.487Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/a5/842ae8f0c08b61d6484b52f99a03510a3a72d23141942d216ebe81fefbce/filelock-3.25.2-py3-none-any.whl", hash = "sha256:ca8afb0da15f229774c9ad1b455ed96e85a81373065fb10446672f64444ddf70", size = 26759, upload-time = "2026-03-11T20:45:37.437Z" },
]
[[package]]
@@ -478,11 +494,11 @@ wheels = [
[[package]]
name = "identify"
-version = "2.6.16"
+version = "2.6.18"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/5b/8d/e8b97e6bd3fb6fb271346f7981362f1e04d6a7463abd0de79e1fda17c067/identify-2.6.16.tar.gz", hash = "sha256:846857203b5511bbe94d5a352a48ef2359532bc8f6727b5544077a0dcfb24980", size = 99360, upload-time = "2026-01-12T18:58:58.201Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/46/c4/7fb4db12296cdb11893d61c92048fe617ee853f8523b9b296ac03b43757e/identify-2.6.18.tar.gz", hash = "sha256:873ac56a5e3fd63e7438a7ecbc4d91aca692eb3fefa4534db2b7913f3fc352fd", size = 99580, upload-time = "2026-03-15T18:39:50.319Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/b8/58/40fbbcefeda82364720eba5cf2270f98496bdfa19ea75b4cccae79c698e6/identify-2.6.16-py2.py3-none-any.whl", hash = "sha256:391ee4d77741d994189522896270b787aed8670389bfd60f326d677d64a6dfb0", size = 99202, upload-time = "2026-01-12T18:58:56.627Z" },
+ { url = "https://files.pythonhosted.org/packages/46/33/92ef41c6fad0233e41d3d84ba8e8ad18d1780f1e5d99b3c683e6d7f98b63/identify-2.6.18-py2.py3-none-any.whl", hash = "sha256:8db9d3c8ea9079db92cafb0ebf97abdc09d52e97f4dcf773a2e694048b7cd737", size = 99394, upload-time = "2026-03-15T18:39:48.915Z" },
]
[[package]]
@@ -586,6 +602,7 @@ requires-dist = [
{ name = "termcolor" },
{ name = "tqdm" },
{ name = "typing-extensions", specifier = ">=4.7,<5" },
+ { name = "typing-extensions", specifier = ">=4.14,<5" },
]
provides-extras = ["aiohttp"]
@@ -777,63 +794,63 @@ wheels = [
[[package]]
name = "numpy"
-version = "2.4.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/57/fd/0005efbd0af48e55eb3c7208af93f2862d4b1a56cd78e84309a2d959208d/numpy-2.4.2.tar.gz", hash = "sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae", size = 20723651, upload-time = "2026-01-31T23:13:10.135Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/51/6e/6f394c9c77668153e14d4da83bcc247beb5952f6ead7699a1a2992613bea/numpy-2.4.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:21982668592194c609de53ba4933a7471880ccbaadcc52352694a59ecc860b3a", size = 16667963, upload-time = "2026-01-31T23:10:52.147Z" },
- { url = "https://files.pythonhosted.org/packages/1f/f8/55483431f2b2fd015ae6ed4fe62288823ce908437ed49db5a03d15151678/numpy-2.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40397bda92382fcec844066efb11f13e1c9a3e2a8e8f318fb72ed8b6db9f60f1", size = 14693571, upload-time = "2026-01-31T23:10:54.789Z" },
- { url = "https://files.pythonhosted.org/packages/2f/20/18026832b1845cdc82248208dd929ca14c9d8f2bac391f67440707fff27c/numpy-2.4.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:b3a24467af63c67829bfaa61eecf18d5432d4f11992688537be59ecd6ad32f5e", size = 5203469, upload-time = "2026-01-31T23:10:57.343Z" },
- { url = "https://files.pythonhosted.org/packages/7d/33/2eb97c8a77daaba34eaa3fa7241a14ac5f51c46a6bd5911361b644c4a1e2/numpy-2.4.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:805cc8de9fd6e7a22da5aed858e0ab16be5a4db6c873dde1d7451c541553aa27", size = 6550820, upload-time = "2026-01-31T23:10:59.429Z" },
- { url = "https://files.pythonhosted.org/packages/b1/91/b97fdfd12dc75b02c44e26c6638241cc004d4079a0321a69c62f51470c4c/numpy-2.4.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d82351358ffbcdcd7b686b90742a9b86632d6c1c051016484fa0b326a0a1548", size = 15663067, upload-time = "2026-01-31T23:11:01.291Z" },
- { url = "https://files.pythonhosted.org/packages/f5/c6/a18e59f3f0b8071cc85cbc8d80cd02d68aa9710170b2553a117203d46936/numpy-2.4.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e35d3e0144137d9fdae62912e869136164534d64a169f86438bc9561b6ad49f", size = 16619782, upload-time = "2026-01-31T23:11:03.669Z" },
- { url = "https://files.pythonhosted.org/packages/b7/83/9751502164601a79e18847309f5ceec0b1446d7b6aa12305759b72cf98b2/numpy-2.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adb6ed2ad29b9e15321d167d152ee909ec73395901b70936f029c3bc6d7f4460", size = 17013128, upload-time = "2026-01-31T23:11:05.913Z" },
- { url = "https://files.pythonhosted.org/packages/61/c4/c4066322256ec740acc1c8923a10047818691d2f8aec254798f3dd90f5f2/numpy-2.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8906e71fd8afcb76580404e2a950caef2685df3d2a57fe82a86ac8d33cc007ba", size = 18345324, upload-time = "2026-01-31T23:11:08.248Z" },
- { url = "https://files.pythonhosted.org/packages/ab/af/6157aa6da728fa4525a755bfad486ae7e3f76d4c1864138003eb84328497/numpy-2.4.2-cp312-cp312-win32.whl", hash = "sha256:ec055f6dae239a6299cace477b479cca2fc125c5675482daf1dd886933a1076f", size = 5960282, upload-time = "2026-01-31T23:11:10.497Z" },
- { url = "https://files.pythonhosted.org/packages/92/0f/7ceaaeaacb40567071e94dbf2c9480c0ae453d5bb4f52bea3892c39dc83c/numpy-2.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:209fae046e62d0ce6435fcfe3b1a10537e858249b3d9b05829e2a05218296a85", size = 12314210, upload-time = "2026-01-31T23:11:12.176Z" },
- { url = "https://files.pythonhosted.org/packages/2f/a3/56c5c604fae6dd40fa2ed3040d005fca97e91bd320d232ac9931d77ba13c/numpy-2.4.2-cp312-cp312-win_arm64.whl", hash = "sha256:fbde1b0c6e81d56f5dccd95dd4a711d9b95df1ae4009a60887e56b27e8d903fa", size = 10220171, upload-time = "2026-01-31T23:11:14.684Z" },
- { url = "https://files.pythonhosted.org/packages/a1/22/815b9fe25d1d7ae7d492152adbc7226d3eff731dffc38fe970589fcaaa38/numpy-2.4.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25f2059807faea4b077a2b6837391b5d830864b3543627f381821c646f31a63c", size = 16663696, upload-time = "2026-01-31T23:11:17.516Z" },
- { url = "https://files.pythonhosted.org/packages/09/f0/817d03a03f93ba9c6c8993de509277d84e69f9453601915e4a69554102a1/numpy-2.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bd3a7a9f5847d2fb8c2c6d1c862fa109c31a9abeca1a3c2bd5a64572955b2979", size = 14688322, upload-time = "2026-01-31T23:11:19.883Z" },
- { url = "https://files.pythonhosted.org/packages/da/b4/f805ab79293c728b9a99438775ce51885fd4f31b76178767cfc718701a39/numpy-2.4.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8e4549f8a3c6d13d55041925e912bfd834285ef1dd64d6bc7d542583355e2e98", size = 5198157, upload-time = "2026-01-31T23:11:22.375Z" },
- { url = "https://files.pythonhosted.org/packages/74/09/826e4289844eccdcd64aac27d13b0fd3f32039915dd5b9ba01baae1f436c/numpy-2.4.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:aea4f66ff44dfddf8c2cffd66ba6538c5ec67d389285292fe428cb2c738c8aef", size = 6546330, upload-time = "2026-01-31T23:11:23.958Z" },
- { url = "https://files.pythonhosted.org/packages/19/fb/cbfdbfa3057a10aea5422c558ac57538e6acc87ec1669e666d32ac198da7/numpy-2.4.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3cd545784805de05aafe1dde61752ea49a359ccba9760c1e5d1c88a93bbf2b7", size = 15660968, upload-time = "2026-01-31T23:11:25.713Z" },
- { url = "https://files.pythonhosted.org/packages/04/dc/46066ce18d01645541f0186877377b9371b8fa8017fa8262002b4ef22612/numpy-2.4.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0d9b7c93578baafcbc5f0b83eaf17b79d345c6f36917ba0c67f45226911d499", size = 16607311, upload-time = "2026-01-31T23:11:28.117Z" },
- { url = "https://files.pythonhosted.org/packages/14/d9/4b5adfc39a43fa6bf918c6d544bc60c05236cc2f6339847fc5b35e6cb5b0/numpy-2.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f74f0f7779cc7ae07d1810aab8ac6b1464c3eafb9e283a40da7309d5e6e48fbb", size = 17012850, upload-time = "2026-01-31T23:11:30.888Z" },
- { url = "https://files.pythonhosted.org/packages/b7/20/adb6e6adde6d0130046e6fdfb7675cc62bc2f6b7b02239a09eb58435753d/numpy-2.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c7ac672d699bf36275c035e16b65539931347d68b70667d28984c9fb34e07fa7", size = 18334210, upload-time = "2026-01-31T23:11:33.214Z" },
- { url = "https://files.pythonhosted.org/packages/78/0e/0a73b3dff26803a8c02baa76398015ea2a5434d9b8265a7898a6028c1591/numpy-2.4.2-cp313-cp313-win32.whl", hash = "sha256:8e9afaeb0beff068b4d9cd20d322ba0ee1cecfb0b08db145e4ab4dd44a6b5110", size = 5958199, upload-time = "2026-01-31T23:11:35.385Z" },
- { url = "https://files.pythonhosted.org/packages/43/bc/6352f343522fcb2c04dbaf94cb30cca6fd32c1a750c06ad6231b4293708c/numpy-2.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:7df2de1e4fba69a51c06c28f5a3de36731eb9639feb8e1cf7e4a7b0daf4cf622", size = 12310848, upload-time = "2026-01-31T23:11:38.001Z" },
- { url = "https://files.pythonhosted.org/packages/6e/8d/6da186483e308da5da1cc6918ce913dcfe14ffde98e710bfeff2a6158d4e/numpy-2.4.2-cp313-cp313-win_arm64.whl", hash = "sha256:0fece1d1f0a89c16b03442eae5c56dc0be0c7883b5d388e0c03f53019a4bfd71", size = 10221082, upload-time = "2026-01-31T23:11:40.392Z" },
- { url = "https://files.pythonhosted.org/packages/25/a1/9510aa43555b44781968935c7548a8926274f815de42ad3997e9e83680dd/numpy-2.4.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5633c0da313330fd20c484c78cdd3f9b175b55e1a766c4a174230c6b70ad8262", size = 14815866, upload-time = "2026-01-31T23:11:42.495Z" },
- { url = "https://files.pythonhosted.org/packages/36/30/6bbb5e76631a5ae46e7923dd16ca9d3f1c93cfa8d4ed79a129814a9d8db3/numpy-2.4.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d9f64d786b3b1dd742c946c42d15b07497ed14af1a1f3ce840cce27daa0ce913", size = 5325631, upload-time = "2026-01-31T23:11:44.7Z" },
- { url = "https://files.pythonhosted.org/packages/46/00/3a490938800c1923b567b3a15cd17896e68052e2145d8662aaf3e1ffc58f/numpy-2.4.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:b21041e8cb6a1eb5312dd1d2f80a94d91efffb7a06b70597d44f1bd2dfc315ab", size = 6646254, upload-time = "2026-01-31T23:11:46.341Z" },
- { url = "https://files.pythonhosted.org/packages/d3/e9/fac0890149898a9b609caa5af7455a948b544746e4b8fe7c212c8edd71f8/numpy-2.4.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00ab83c56211a1d7c07c25e3217ea6695e50a3e2f255053686b081dc0b091a82", size = 15720138, upload-time = "2026-01-31T23:11:48.082Z" },
- { url = "https://files.pythonhosted.org/packages/ea/5c/08887c54e68e1e28df53709f1893ce92932cc6f01f7c3d4dc952f61ffd4e/numpy-2.4.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2fb882da679409066b4603579619341c6d6898fc83a8995199d5249f986e8e8f", size = 16655398, upload-time = "2026-01-31T23:11:50.293Z" },
- { url = "https://files.pythonhosted.org/packages/4d/89/253db0fa0e66e9129c745e4ef25631dc37d5f1314dad2b53e907b8538e6d/numpy-2.4.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:66cb9422236317f9d44b67b4d18f44efe6e9c7f8794ac0462978513359461554", size = 17079064, upload-time = "2026-01-31T23:11:52.927Z" },
- { url = "https://files.pythonhosted.org/packages/2a/d5/cbade46ce97c59c6c3da525e8d95b7abe8a42974a1dc5c1d489c10433e88/numpy-2.4.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0f01dcf33e73d80bd8dc0f20a71303abbafa26a19e23f6b68d1aa9990af90257", size = 18379680, upload-time = "2026-01-31T23:11:55.22Z" },
- { url = "https://files.pythonhosted.org/packages/40/62/48f99ae172a4b63d981babe683685030e8a3df4f246c893ea5c6ef99f018/numpy-2.4.2-cp313-cp313t-win32.whl", hash = "sha256:52b913ec40ff7ae845687b0b34d8d93b60cb66dcee06996dd5c99f2fc9328657", size = 6082433, upload-time = "2026-01-31T23:11:58.096Z" },
- { url = "https://files.pythonhosted.org/packages/07/38/e054a61cfe48ad9f1ed0d188e78b7e26859d0b60ef21cd9de4897cdb5326/numpy-2.4.2-cp313-cp313t-win_amd64.whl", hash = "sha256:5eea80d908b2c1f91486eb95b3fb6fab187e569ec9752ab7d9333d2e66bf2d6b", size = 12451181, upload-time = "2026-01-31T23:11:59.782Z" },
- { url = "https://files.pythonhosted.org/packages/6e/a4/a05c3a6418575e185dd84d0b9680b6bb2e2dc3e4202f036b7b4e22d6e9dc/numpy-2.4.2-cp313-cp313t-win_arm64.whl", hash = "sha256:fd49860271d52127d61197bb50b64f58454e9f578cb4b2c001a6de8b1f50b0b1", size = 10290756, upload-time = "2026-01-31T23:12:02.438Z" },
- { url = "https://files.pythonhosted.org/packages/18/88/b7df6050bf18fdcfb7046286c6535cabbdd2064a3440fca3f069d319c16e/numpy-2.4.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:444be170853f1f9d528428eceb55f12918e4fda5d8805480f36a002f1415e09b", size = 16663092, upload-time = "2026-01-31T23:12:04.521Z" },
- { url = "https://files.pythonhosted.org/packages/25/7a/1fee4329abc705a469a4afe6e69b1ef7e915117747886327104a8493a955/numpy-2.4.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d1240d50adff70c2a88217698ca844723068533f3f5c5fa6ee2e3220e3bdb000", size = 14698770, upload-time = "2026-01-31T23:12:06.96Z" },
- { url = "https://files.pythonhosted.org/packages/fb/0b/f9e49ba6c923678ad5bc38181c08ac5e53b7a5754dbca8e581aa1a56b1ff/numpy-2.4.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:7cdde6de52fb6664b00b056341265441192d1291c130e99183ec0d4b110ff8b1", size = 5208562, upload-time = "2026-01-31T23:12:09.632Z" },
- { url = "https://files.pythonhosted.org/packages/7d/12/d7de8f6f53f9bb76997e5e4c069eda2051e3fe134e9181671c4391677bb2/numpy-2.4.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:cda077c2e5b780200b6b3e09d0b42205a3d1c68f30c6dceb90401c13bff8fe74", size = 6543710, upload-time = "2026-01-31T23:12:11.969Z" },
- { url = "https://files.pythonhosted.org/packages/09/63/c66418c2e0268a31a4cf8a8b512685748200f8e8e8ec6c507ce14e773529/numpy-2.4.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d30291931c915b2ab5717c2974bb95ee891a1cf22ebc16a8006bd59cd210d40a", size = 15677205, upload-time = "2026-01-31T23:12:14.33Z" },
- { url = "https://files.pythonhosted.org/packages/5d/6c/7f237821c9642fb2a04d2f1e88b4295677144ca93285fd76eff3bcba858d/numpy-2.4.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bba37bc29d4d85761deed3954a1bc62be7cf462b9510b51d367b769a8c8df325", size = 16611738, upload-time = "2026-01-31T23:12:16.525Z" },
- { url = "https://files.pythonhosted.org/packages/c2/a7/39c4cdda9f019b609b5c473899d87abff092fc908cfe4d1ecb2fcff453b0/numpy-2.4.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b2f0073ed0868db1dcd86e052d37279eef185b9c8db5bf61f30f46adac63c909", size = 17028888, upload-time = "2026-01-31T23:12:19.306Z" },
- { url = "https://files.pythonhosted.org/packages/da/b3/e84bb64bdfea967cc10950d71090ec2d84b49bc691df0025dddb7c26e8e3/numpy-2.4.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7f54844851cdb630ceb623dcec4db3240d1ac13d4990532446761baede94996a", size = 18339556, upload-time = "2026-01-31T23:12:21.816Z" },
- { url = "https://files.pythonhosted.org/packages/88/f5/954a291bc1192a27081706862ac62bb5920fbecfbaa302f64682aa90beed/numpy-2.4.2-cp314-cp314-win32.whl", hash = "sha256:12e26134a0331d8dbd9351620f037ec470b7c75929cb8a1537f6bfe411152a1a", size = 6006899, upload-time = "2026-01-31T23:12:24.14Z" },
- { url = "https://files.pythonhosted.org/packages/05/cb/eff72a91b2efdd1bc98b3b8759f6a1654aa87612fc86e3d87d6fe4f948c4/numpy-2.4.2-cp314-cp314-win_amd64.whl", hash = "sha256:068cdb2d0d644cdb45670810894f6a0600797a69c05f1ac478e8d31670b8ee75", size = 12443072, upload-time = "2026-01-31T23:12:26.33Z" },
- { url = "https://files.pythonhosted.org/packages/37/75/62726948db36a56428fce4ba80a115716dc4fad6a3a4352487f8bb950966/numpy-2.4.2-cp314-cp314-win_arm64.whl", hash = "sha256:6ed0be1ee58eef41231a5c943d7d1375f093142702d5723ca2eb07db9b934b05", size = 10494886, upload-time = "2026-01-31T23:12:28.488Z" },
- { url = "https://files.pythonhosted.org/packages/36/2f/ee93744f1e0661dc267e4b21940870cabfae187c092e1433b77b09b50ac4/numpy-2.4.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:98f16a80e917003a12c0580f97b5f875853ebc33e2eaa4bccfc8201ac6869308", size = 14818567, upload-time = "2026-01-31T23:12:30.709Z" },
- { url = "https://files.pythonhosted.org/packages/a7/24/6535212add7d76ff938d8bdc654f53f88d35cddedf807a599e180dcb8e66/numpy-2.4.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:20abd069b9cda45874498b245c8015b18ace6de8546bf50dfa8cea1696ed06ef", size = 5328372, upload-time = "2026-01-31T23:12:32.962Z" },
- { url = "https://files.pythonhosted.org/packages/5e/9d/c48f0a035725f925634bf6b8994253b43f2047f6778a54147d7e213bc5a7/numpy-2.4.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e98c97502435b53741540a5717a6749ac2ada901056c7db951d33e11c885cc7d", size = 6649306, upload-time = "2026-01-31T23:12:34.797Z" },
- { url = "https://files.pythonhosted.org/packages/81/05/7c73a9574cd4a53a25907bad38b59ac83919c0ddc8234ec157f344d57d9a/numpy-2.4.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da6cad4e82cb893db4b69105c604d805e0c3ce11501a55b5e9f9083b47d2ffe8", size = 15722394, upload-time = "2026-01-31T23:12:36.565Z" },
- { url = "https://files.pythonhosted.org/packages/35/fa/4de10089f21fc7d18442c4a767ab156b25c2a6eaf187c0db6d9ecdaeb43f/numpy-2.4.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e4424677ce4b47fe73c8b5556d876571f7c6945d264201180db2dc34f676ab5", size = 16653343, upload-time = "2026-01-31T23:12:39.188Z" },
- { url = "https://files.pythonhosted.org/packages/b8/f9/d33e4ffc857f3763a57aa85650f2e82486832d7492280ac21ba9efda80da/numpy-2.4.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2b8f157c8a6f20eb657e240f8985cc135598b2b46985c5bccbde7616dc9c6b1e", size = 17078045, upload-time = "2026-01-31T23:12:42.041Z" },
- { url = "https://files.pythonhosted.org/packages/c8/b8/54bdb43b6225badbea6389fa038c4ef868c44f5890f95dd530a218706da3/numpy-2.4.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5daf6f3914a733336dab21a05cdec343144600e964d2fcdabaac0c0269874b2a", size = 18380024, upload-time = "2026-01-31T23:12:44.331Z" },
- { url = "https://files.pythonhosted.org/packages/a5/55/6e1a61ded7af8df04016d81b5b02daa59f2ea9252ee0397cb9f631efe9e5/numpy-2.4.2-cp314-cp314t-win32.whl", hash = "sha256:8c50dd1fc8826f5b26a5ee4d77ca55d88a895f4e4819c7ecc2a9f5905047a443", size = 6153937, upload-time = "2026-01-31T23:12:47.229Z" },
- { url = "https://files.pythonhosted.org/packages/45/aa/fa6118d1ed6d776b0983f3ceac9b1a5558e80df9365b1c3aa6d42bf9eee4/numpy-2.4.2-cp314-cp314t-win_amd64.whl", hash = "sha256:fcf92bee92742edd401ba41135185866f7026c502617f422eb432cfeca4fe236", size = 12631844, upload-time = "2026-01-31T23:12:48.997Z" },
- { url = "https://files.pythonhosted.org/packages/32/0a/2ec5deea6dcd158f254a7b372fb09cfba5719419c8d66343bab35237b3fb/numpy-2.4.2-cp314-cp314t-win_arm64.whl", hash = "sha256:1f92f53998a17265194018d1cc321b2e96e900ca52d54c7c77837b71b9465181", size = 10565379, upload-time = "2026-01-31T23:12:51.345Z" },
+version = "2.4.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/10/8b/c265f4823726ab832de836cdd184d0986dcf94480f81e8739692a7ac7af2/numpy-2.4.3.tar.gz", hash = "sha256:483a201202b73495f00dbc83796c6ae63137a9bdade074f7648b3e32613412dd", size = 20727743, upload-time = "2026-03-09T07:58:53.426Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a9/ed/6388632536f9788cea23a3a1b629f25b43eaacd7d7377e5d6bc7b9deb69b/numpy-2.4.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:61b0cbabbb6126c8df63b9a3a0c4b1f44ebca5e12ff6997b80fcf267fb3150ef", size = 16669628, upload-time = "2026-03-09T07:56:24.252Z" },
+ { url = "https://files.pythonhosted.org/packages/74/1b/ee2abfc68e1ce728b2958b6ba831d65c62e1b13ce3017c13943f8f9b5b2e/numpy-2.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7395e69ff32526710748f92cd8c9849b361830968ea3e24a676f272653e8983e", size = 14696872, upload-time = "2026-03-09T07:56:26.991Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/d1/780400e915ff5638166f11ca9dc2c5815189f3d7cf6f8759a1685e586413/numpy-2.4.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:abdce0f71dcb4a00e4e77f3faf05e4616ceccfe72ccaa07f47ee79cda3b7b0f4", size = 5203489, upload-time = "2026-03-09T07:56:29.414Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/bb/baffa907e9da4cc34a6e556d6d90e032f6d7a75ea47968ea92b4858826c4/numpy-2.4.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:48da3a4ee1336454b07497ff7ec83903efa5505792c4e6d9bf83d99dc07a1e18", size = 6550814, upload-time = "2026-03-09T07:56:32.225Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/12/8c9f0c6c95f76aeb20fc4a699c33e9f827fa0d0f857747c73bb7b17af945/numpy-2.4.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32e3bef222ad6b052280311d1d60db8e259e4947052c3ae7dd6817451fc8a4c5", size = 15666601, upload-time = "2026-03-09T07:56:34.461Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/79/cc665495e4d57d0aa6fbcc0aa57aa82671dfc78fbf95fe733ed86d98f52a/numpy-2.4.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e7dd01a46700b1967487141a66ac1a3cf0dd8ebf1f08db37d46389401512ca97", size = 16621358, upload-time = "2026-03-09T07:56:36.852Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/40/b4ecb7224af1065c3539f5ecfff879d090de09608ad1008f02c05c770cb3/numpy-2.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:76f0f283506c28b12bba319c0fab98217e9f9b54e6160e9c79e9f7348ba32e9c", size = 17016135, upload-time = "2026-03-09T07:56:39.337Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/b1/6a88e888052eed951afed7a142dcdf3b149a030ca59b4c71eef085858e43/numpy-2.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:737f630a337364665aba3b5a77e56a68cc42d350edd010c345d65a3efa3addcc", size = 18345816, upload-time = "2026-03-09T07:56:42.31Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/8f/103a60c5f8c3d7fc678c19cd7b2476110da689ccb80bc18050efbaeae183/numpy-2.4.3-cp312-cp312-win32.whl", hash = "sha256:26952e18d82a1dbbc2f008d402021baa8d6fc8e84347a2072a25e08b46d698b9", size = 5960132, upload-time = "2026-03-09T07:56:44.851Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/7c/f5ee1bf6ed888494978046a809df2882aad35d414b622893322df7286879/numpy-2.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:65f3c2455188f09678355f5cae1f959a06b778bc66d535da07bf2ef20cd319d5", size = 12316144, upload-time = "2026-03-09T07:56:47.057Z" },
+ { url = "https://files.pythonhosted.org/packages/71/46/8d1cb3f7a00f2fb6394140e7e6623696e54c6318a9d9691bb4904672cf42/numpy-2.4.3-cp312-cp312-win_arm64.whl", hash = "sha256:2abad5c7fef172b3377502bde47892439bae394a71bc329f31df0fd829b41a9e", size = 10220364, upload-time = "2026-03-09T07:56:49.849Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/d0/1fe47a98ce0df229238b77611340aff92d52691bcbc10583303181abf7fc/numpy-2.4.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b346845443716c8e542d54112966383b448f4a3ba5c66409771b8c0889485dd3", size = 16665297, upload-time = "2026-03-09T07:56:52.296Z" },
+ { url = "https://files.pythonhosted.org/packages/27/d9/4e7c3f0e68dfa91f21c6fb6cf839bc829ec920688b1ce7ec722b1a6202fb/numpy-2.4.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2629289168f4897a3c4e23dc98d6f1731f0fc0fe52fb9db19f974041e4cc12b9", size = 14691853, upload-time = "2026-03-09T07:56:54.992Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/66/bd096b13a87549683812b53ab211e6d413497f84e794fb3c39191948da97/numpy-2.4.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bb2e3cf95854233799013779216c57e153c1ee67a0bf92138acca0e429aefaee", size = 5198435, upload-time = "2026-03-09T07:56:57.184Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/2f/687722910b5a5601de2135c891108f51dfc873d8e43c8ed9f4ebb440b4a2/numpy-2.4.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:7f3408ff897f8ab07a07fbe2823d7aee6ff644c097cc1f90382511fe982f647f", size = 6546347, upload-time = "2026-03-09T07:56:59.531Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/ec/7971c4e98d86c564750393fab8d7d83d0a9432a9d78bb8a163a6dc59967a/numpy-2.4.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:decb0eb8a53c3b009b0962378065589685d66b23467ef5dac16cbe818afde27f", size = 15664626, upload-time = "2026-03-09T07:57:01.385Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/eb/7daecbea84ec935b7fc732e18f532073064a3816f0932a40a17f3349185f/numpy-2.4.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5f51900414fc9204a0e0da158ba2ac52b75656e7dce7e77fb9f84bfa343b4cc", size = 16608916, upload-time = "2026-03-09T07:57:04.008Z" },
+ { url = "https://files.pythonhosted.org/packages/df/58/2a2b4a817ffd7472dca4421d9f0776898b364154e30c95f42195041dc03b/numpy-2.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6bd06731541f89cdc01b261ba2c9e037f1543df7472517836b78dfb15bd6e476", size = 17015824, upload-time = "2026-03-09T07:57:06.347Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/ca/627a828d44e78a418c55f82dd4caea8ea4a8ef24e5144d9e71016e52fb40/numpy-2.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:22654fe6be0e5206f553a9250762c653d3698e46686eee53b399ab90da59bd92", size = 18334581, upload-time = "2026-03-09T07:57:09.114Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/c0/76f93962fc79955fcba30a429b62304332345f22d4daec1cb33653425643/numpy-2.4.3-cp313-cp313-win32.whl", hash = "sha256:d71e379452a2f670ccb689ec801b1218cd3983e253105d6e83780967e899d687", size = 5958618, upload-time = "2026-03-09T07:57:11.432Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/3c/88af0040119209b9b5cb59485fa48b76f372c73068dbf9254784b975ac53/numpy-2.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:0a60e17a14d640f49146cb38e3f105f571318db7826d9b6fef7e4dce758faecd", size = 12312824, upload-time = "2026-03-09T07:57:13.586Z" },
+ { url = "https://files.pythonhosted.org/packages/58/ce/3d07743aced3d173f877c3ef6a454c2174ba42b584ab0b7e6d99374f51ed/numpy-2.4.3-cp313-cp313-win_arm64.whl", hash = "sha256:c9619741e9da2059cd9c3f206110b97583c7152c1dc9f8aafd4beb450ac1c89d", size = 10221218, upload-time = "2026-03-09T07:57:16.183Z" },
+ { url = "https://files.pythonhosted.org/packages/62/09/d96b02a91d09e9d97862f4fc8bfebf5400f567d8eb1fe4b0cc4795679c15/numpy-2.4.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7aa4e54f6469300ebca1d9eb80acd5253cdfa36f2c03d79a35883687da430875", size = 14819570, upload-time = "2026-03-09T07:57:18.564Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/ca/0b1aba3905fdfa3373d523b2b15b19029f4f3031c87f4066bd9d20ef6c6b/numpy-2.4.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d1b90d840b25874cf5cd20c219af10bac3667db3876d9a495609273ebe679070", size = 5326113, upload-time = "2026-03-09T07:57:21.052Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/63/406e0fd32fcaeb94180fd6a4c41e55736d676c54346b7efbce548b94a914/numpy-2.4.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a749547700de0a20a6718293396ec237bb38218049cfce788e08fcb716e8cf73", size = 6646370, upload-time = "2026-03-09T07:57:22.804Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/d0/10f7dc157d4b37af92720a196be6f54f889e90dcd30dce9dc657ed92c257/numpy-2.4.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f3c4a151a2e529adf49c1d54f0f57ff8f9b233ee4d44af623a81553ab86368", size = 15723499, upload-time = "2026-03-09T07:57:24.693Z" },
+ { url = "https://files.pythonhosted.org/packages/66/f1/d1c2bf1161396629701bc284d958dc1efa3a5a542aab83cf11ee6eb4cba5/numpy-2.4.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22c31dc07025123aedf7f2db9e91783df13f1776dc52c6b22c620870dc0fab22", size = 16657164, upload-time = "2026-03-09T07:57:27.676Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/be/cca19230b740af199ac47331a21c71e7a3d0ba59661350483c1600d28c37/numpy-2.4.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:148d59127ac95979d6f07e4d460f934ebdd6eed641db9c0db6c73026f2b2101a", size = 17081544, upload-time = "2026-03-09T07:57:30.664Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/c5/9602b0cbb703a0936fb40f8a95407e8171935b15846de2f0776e08af04c7/numpy-2.4.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a97cbf7e905c435865c2d939af3d93f99d18eaaa3cabe4256f4304fb51604349", size = 18380290, upload-time = "2026-03-09T07:57:33.763Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/81/9f24708953cd30be9ee36ec4778f4b112b45165812f2ada4cc5ea1c1f254/numpy-2.4.3-cp313-cp313t-win32.whl", hash = "sha256:be3b8487d725a77acccc9924f65fd8bce9af7fac8c9820df1049424a2115af6c", size = 6082814, upload-time = "2026-03-09T07:57:36.491Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/9e/52f6eaa13e1a799f0ab79066c17f7016a4a8ae0c1aefa58c82b4dab690b4/numpy-2.4.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1ec84fd7c8e652b0f4aaaf2e6e9cc8eaa9b1b80a537e06b2e3a2fb176eedcb26", size = 12452673, upload-time = "2026-03-09T07:57:38.281Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/04/b8cece6ead0b30c9fbd99bb835ad7ea0112ac5f39f069788c5558e3b1ab2/numpy-2.4.3-cp313-cp313t-win_arm64.whl", hash = "sha256:120df8c0a81ebbf5b9020c91439fccd85f5e018a927a39f624845be194a2be02", size = 10290907, upload-time = "2026-03-09T07:57:40.747Z" },
+ { url = "https://files.pythonhosted.org/packages/70/ae/3936f79adebf8caf81bd7a599b90a561334a658be4dcc7b6329ebf4ee8de/numpy-2.4.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:5884ce5c7acfae1e4e1b6fde43797d10aa506074d25b531b4f54bde33c0c31d4", size = 16664563, upload-time = "2026-03-09T07:57:43.817Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/62/760f2b55866b496bb1fa7da2a6db076bef908110e568b02fcfc1422e2a3a/numpy-2.4.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:297837823f5bc572c5f9379b0c9f3a3365f08492cbdc33bcc3af174372ebb168", size = 14702161, upload-time = "2026-03-09T07:57:46.169Z" },
+ { url = "https://files.pythonhosted.org/packages/32/af/a7a39464e2c0a21526fb4fb76e346fb172ebc92f6d1c7a07c2c139cc17b1/numpy-2.4.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:a111698b4a3f8dcbe54c64a7708f049355abd603e619013c346553c1fd4ca90b", size = 5208738, upload-time = "2026-03-09T07:57:48.506Z" },
+ { url = "https://files.pythonhosted.org/packages/29/8c/2a0cf86a59558fa078d83805589c2de490f29ed4fb336c14313a161d358a/numpy-2.4.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:4bd4741a6a676770e0e97fe9ab2e51de01183df3dcbcec591d26d331a40de950", size = 6543618, upload-time = "2026-03-09T07:57:50.591Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/b8/612ce010c0728b1c363fa4ea3aa4c22fe1c5da1de008486f8c2f5cb92fae/numpy-2.4.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:54f29b877279d51e210e0c80709ee14ccbbad647810e8f3d375561c45ef613dd", size = 15680676, upload-time = "2026-03-09T07:57:52.34Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/7e/4f120ecc54ba26ddf3dc348eeb9eb063f421de65c05fc961941798feea18/numpy-2.4.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:679f2a834bae9020f81534671c56fd0cc76dd7e5182f57131478e23d0dc59e24", size = 16613492, upload-time = "2026-03-09T07:57:54.91Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/86/1b6020db73be330c4b45d5c6ee4295d59cfeef0e3ea323959d053e5a6909/numpy-2.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d84f0f881cb2225c2dfd7f78a10a5645d487a496c6668d6cc39f0f114164f3d0", size = 17031789, upload-time = "2026-03-09T07:57:57.641Z" },
+ { url = "https://files.pythonhosted.org/packages/07/3a/3b90463bf41ebc21d1b7e06079f03070334374208c0f9a1f05e4ae8455e7/numpy-2.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d213c7e6e8d211888cc359bab7199670a00f5b82c0978b9d1c75baf1eddbeac0", size = 18339941, upload-time = "2026-03-09T07:58:00.577Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/74/6d736c4cd962259fd8bae9be27363eb4883a2f9069763747347544c2a487/numpy-2.4.3-cp314-cp314-win32.whl", hash = "sha256:52077feedeff7c76ed7c9f1a0428558e50825347b7545bbb8523da2cd55c547a", size = 6007503, upload-time = "2026-03-09T07:58:03.331Z" },
+ { url = "https://files.pythonhosted.org/packages/48/39/c56ef87af669364356bb011922ef0734fc49dad51964568634c72a009488/numpy-2.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:0448e7f9caefb34b4b7dd2b77f21e8906e5d6f0365ad525f9f4f530b13df2afc", size = 12444915, upload-time = "2026-03-09T07:58:06.353Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/1f/ab8528e38d295fd349310807496fabb7cf9fe2e1f70b97bc20a483ea9d4a/numpy-2.4.3-cp314-cp314-win_arm64.whl", hash = "sha256:b44fd60341c4d9783039598efadd03617fa28d041fc37d22b62d08f2027fa0e7", size = 10494875, upload-time = "2026-03-09T07:58:08.734Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/ef/b7c35e4d5ef141b836658ab21a66d1a573e15b335b1d111d31f26c8ef80f/numpy-2.4.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0a195f4216be9305a73c0e91c9b026a35f2161237cf1c6de9b681637772ea657", size = 14822225, upload-time = "2026-03-09T07:58:11.034Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/8d/7730fa9278cf6648639946cc816e7cc89f0d891602584697923375f801ed/numpy-2.4.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:cd32fbacb9fd1bf041bf8e89e4576b6f00b895f06d00914820ae06a616bdfef7", size = 5328769, upload-time = "2026-03-09T07:58:13.67Z" },
+ { url = "https://files.pythonhosted.org/packages/47/01/d2a137317c958b074d338807c1b6a383406cdf8b8e53b075d804cc3d211d/numpy-2.4.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:2e03c05abaee1f672e9d67bc858f300b5ccba1c21397211e8d77d98350972093", size = 6649461, upload-time = "2026-03-09T07:58:15.912Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/34/812ce12bc0f00272a4b0ec0d713cd237cb390666eb6206323d1cc9cedbb2/numpy-2.4.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d1ce23cce91fcea443320a9d0ece9b9305d4368875bab09538f7a5b4131938a", size = 15725809, upload-time = "2026-03-09T07:58:17.787Z" },
+ { url = "https://files.pythonhosted.org/packages/25/c0/2aed473a4823e905e765fee3dc2cbf504bd3e68ccb1150fbdabd5c39f527/numpy-2.4.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c59020932feb24ed49ffd03704fbab89f22aa9c0d4b180ff45542fe8918f5611", size = 16655242, upload-time = "2026-03-09T07:58:20.476Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/c8/7e052b2fc87aa0e86de23f20e2c42bd261c624748aa8efd2c78f7bb8d8c6/numpy-2.4.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9684823a78a6cd6ad7511fc5e25b07947d1d5b5e2812c93fe99d7d4195130720", size = 17080660, upload-time = "2026-03-09T07:58:23.067Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/3d/0876746044db2adcb11549f214d104f2e1be00f07a67edbb4e2812094847/numpy-2.4.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0200b25c687033316fb39f0ff4e3e690e8957a2c3c8d22499891ec58c37a3eb5", size = 18380384, upload-time = "2026-03-09T07:58:25.839Z" },
+ { url = "https://files.pythonhosted.org/packages/07/12/8160bea39da3335737b10308df4f484235fd297f556745f13092aa039d3b/numpy-2.4.3-cp314-cp314t-win32.whl", hash = "sha256:5e10da9e93247e554bb1d22f8edc51847ddd7dde52d85ce31024c1b4312bfba0", size = 6154547, upload-time = "2026-03-09T07:58:28.289Z" },
+ { url = "https://files.pythonhosted.org/packages/42/f3/76534f61f80d74cc9cdf2e570d3d4eeb92c2280a27c39b0aaf471eda7b48/numpy-2.4.3-cp314-cp314t-win_amd64.whl", hash = "sha256:45f003dbdffb997a03da2d1d0cb41fbd24a87507fb41605c0420a3db5bd4667b", size = 12633645, upload-time = "2026-03-09T07:58:30.384Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/b6/7c0d4334c15983cec7f92a69e8ce9b1e6f31857e5ee3a413ac424e6bd63d/numpy-2.4.3-cp314-cp314t-win_arm64.whl", hash = "sha256:4d382735cecd7bcf090172489a525cd7d4087bc331f7df9f60ddc9a296cf208e", size = 10565454, upload-time = "2026-03-09T07:58:33.031Z" },
]
[[package]]
@@ -847,54 +864,54 @@ wheels = [
[[package]]
name = "pandas"
-version = "3.0.0"
+version = "3.0.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy" },
{ name = "python-dateutil" },
{ name = "tzdata", marker = "sys_platform == 'emscripten' or sys_platform == 'win32' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/de/da/b1dc0481ab8d55d0f46e343cfe67d4551a0e14fcee52bd38ca1bd73258d8/pandas-3.0.0.tar.gz", hash = "sha256:0facf7e87d38f721f0af46fe70d97373a37701b1c09f7ed7aeeb292ade5c050f", size = 4633005, upload-time = "2026-01-21T15:52:04.726Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0b/38/db33686f4b5fa64d7af40d96361f6a4615b8c6c8f1b3d334eee46ae6160e/pandas-3.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9803b31f5039b3c3b10cc858c5e40054adb4b29b4d81cb2fd789f4121c8efbcd", size = 10334013, upload-time = "2026-01-21T15:50:34.771Z" },
- { url = "https://files.pythonhosted.org/packages/a5/7b/9254310594e9774906bacdd4e732415e1f86ab7dbb4b377ef9ede58cd8ec/pandas-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14c2a4099cd38a1d18ff108168ea417909b2dea3bd1ebff2ccf28ddb6a74d740", size = 9874154, upload-time = "2026-01-21T15:50:36.67Z" },
- { url = "https://files.pythonhosted.org/packages/63/d4/726c5a67a13bc66643e66d2e9ff115cead482a44fc56991d0c4014f15aaf/pandas-3.0.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d257699b9a9960e6125686098d5714ac59d05222bef7a5e6af7a7fd87c650801", size = 10384433, upload-time = "2026-01-21T15:50:39.132Z" },
- { url = "https://files.pythonhosted.org/packages/bf/2e/9211f09bedb04f9832122942de8b051804b31a39cfbad199a819bb88d9f3/pandas-3.0.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:69780c98f286076dcafca38d8b8eee1676adf220199c0a39f0ecbf976b68151a", size = 10864519, upload-time = "2026-01-21T15:50:41.043Z" },
- { url = "https://files.pythonhosted.org/packages/00/8d/50858522cdc46ac88b9afdc3015e298959a70a08cd21e008a44e9520180c/pandas-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4a66384f017240f3858a4c8a7cf21b0591c3ac885cddb7758a589f0f71e87ebb", size = 11394124, upload-time = "2026-01-21T15:50:43.377Z" },
- { url = "https://files.pythonhosted.org/packages/86/3f/83b2577db02503cd93d8e95b0f794ad9d4be0ba7cb6c8bcdcac964a34a42/pandas-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:be8c515c9bc33989d97b89db66ea0cececb0f6e3c2a87fcc8b69443a6923e95f", size = 11920444, upload-time = "2026-01-21T15:50:45.932Z" },
- { url = "https://files.pythonhosted.org/packages/64/2d/4f8a2f192ed12c90a0aab47f5557ece0e56b0370c49de9454a09de7381b2/pandas-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a453aad8c4f4e9f166436994a33884442ea62aa8b27d007311e87521b97246e1", size = 9730970, upload-time = "2026-01-21T15:50:47.962Z" },
- { url = "https://files.pythonhosted.org/packages/d4/64/ff571be435cf1e643ca98d0945d76732c0b4e9c37191a89c8550b105eed1/pandas-3.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:da768007b5a33057f6d9053563d6b74dd6d029c337d93c6d0d22a763a5c2ecc0", size = 9041950, upload-time = "2026-01-21T15:50:50.422Z" },
- { url = "https://files.pythonhosted.org/packages/6f/fa/7f0ac4ca8877c57537aaff2a842f8760e630d8e824b730eb2e859ffe96ca/pandas-3.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b78d646249b9a2bc191040988c7bb524c92fa8534fb0898a0741d7e6f2ffafa6", size = 10307129, upload-time = "2026-01-21T15:50:52.877Z" },
- { url = "https://files.pythonhosted.org/packages/6f/11/28a221815dcea4c0c9414dfc845e34a84a6a7dabc6da3194498ed5ba4361/pandas-3.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bc9cba7b355cb4162442a88ce495e01cb605f17ac1e27d6596ac963504e0305f", size = 9850201, upload-time = "2026-01-21T15:50:54.807Z" },
- { url = "https://files.pythonhosted.org/packages/ba/da/53bbc8c5363b7e5bd10f9ae59ab250fc7a382ea6ba08e4d06d8694370354/pandas-3.0.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c9a1a149aed3b6c9bf246033ff91e1b02d529546c5d6fb6b74a28fea0cf4c70", size = 10354031, upload-time = "2026-01-21T15:50:57.463Z" },
- { url = "https://files.pythonhosted.org/packages/f7/a3/51e02ebc2a14974170d51e2410dfdab58870ea9bcd37cda15bd553d24dc4/pandas-3.0.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95683af6175d884ee89471842acfca29172a85031fccdabc35e50c0984470a0e", size = 10861165, upload-time = "2026-01-21T15:50:59.32Z" },
- { url = "https://files.pythonhosted.org/packages/a5/fe/05a51e3cac11d161472b8297bd41723ea98013384dd6d76d115ce3482f9b/pandas-3.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1fbbb5a7288719e36b76b4f18d46ede46e7f916b6c8d9915b756b0a6c3f792b3", size = 11359359, upload-time = "2026-01-21T15:51:02.014Z" },
- { url = "https://files.pythonhosted.org/packages/ee/56/ba620583225f9b85a4d3e69c01df3e3870659cc525f67929b60e9f21dcd1/pandas-3.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e8b9808590fa364416b49b2a35c1f4cf2785a6c156935879e57f826df22038e", size = 11912907, upload-time = "2026-01-21T15:51:05.175Z" },
- { url = "https://files.pythonhosted.org/packages/c9/8c/c6638d9f67e45e07656b3826405c5cc5f57f6fd07c8b2572ade328c86e22/pandas-3.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:98212a38a709feb90ae658cb6227ea3657c22ba8157d4b8f913cd4c950de5e7e", size = 9732138, upload-time = "2026-01-21T15:51:07.569Z" },
- { url = "https://files.pythonhosted.org/packages/7b/bf/bd1335c3bf1770b6d8fed2799993b11c4971af93bb1b729b9ebbc02ca2ec/pandas-3.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:177d9df10b3f43b70307a149d7ec49a1229a653f907aa60a48f1877d0e6be3be", size = 9033568, upload-time = "2026-01-21T15:51:09.484Z" },
- { url = "https://files.pythonhosted.org/packages/8e/c6/f5e2171914d5e29b9171d495344097d54e3ffe41d2d85d8115baba4dc483/pandas-3.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2713810ad3806767b89ad3b7b69ba153e1c6ff6d9c20f9c2140379b2a98b6c98", size = 10741936, upload-time = "2026-01-21T15:51:11.693Z" },
- { url = "https://files.pythonhosted.org/packages/51/88/9a0164f99510a1acb9f548691f022c756c2314aad0d8330a24616c14c462/pandas-3.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:15d59f885ee5011daf8335dff47dcb8a912a27b4ad7826dc6cbe809fd145d327", size = 10393884, upload-time = "2026-01-21T15:51:14.197Z" },
- { url = "https://files.pythonhosted.org/packages/e0/53/b34d78084d88d8ae2b848591229da8826d1e65aacf00b3abe34023467648/pandas-3.0.0-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24e6547fb64d2c92665dd2adbfa4e85fa4fd70a9c070e7cfb03b629a0bbab5eb", size = 10310740, upload-time = "2026-01-21T15:51:16.093Z" },
- { url = "https://files.pythonhosted.org/packages/5b/d3/bee792e7c3d6930b74468d990604325701412e55d7aaf47460a22311d1a5/pandas-3.0.0-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:48ee04b90e2505c693d3f8e8f524dab8cb8aaf7ddcab52c92afa535e717c4812", size = 10700014, upload-time = "2026-01-21T15:51:18.818Z" },
- { url = "https://files.pythonhosted.org/packages/55/db/2570bc40fb13aaed1cbc3fbd725c3a60ee162477982123c3adc8971e7ac1/pandas-3.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:66f72fb172959af42a459e27a8d8d2c7e311ff4c1f7db6deb3b643dbc382ae08", size = 11323737, upload-time = "2026-01-21T15:51:20.784Z" },
- { url = "https://files.pythonhosted.org/packages/bc/2e/297ac7f21c8181b62a4cccebad0a70caf679adf3ae5e83cb676194c8acc3/pandas-3.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4a4a400ca18230976724a5066f20878af785f36c6756e498e94c2a5e5d57779c", size = 11771558, upload-time = "2026-01-21T15:51:22.977Z" },
- { url = "https://files.pythonhosted.org/packages/0a/46/e1c6876d71c14332be70239acce9ad435975a80541086e5ffba2f249bcf6/pandas-3.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:940eebffe55528074341a5a36515f3e4c5e25e958ebbc764c9502cfc35ba3faa", size = 10473771, upload-time = "2026-01-21T15:51:25.285Z" },
- { url = "https://files.pythonhosted.org/packages/c0/db/0270ad9d13c344b7a36fa77f5f8344a46501abf413803e885d22864d10bf/pandas-3.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:597c08fb9fef0edf1e4fa2f9828dd27f3d78f9b8c9b4a748d435ffc55732310b", size = 10312075, upload-time = "2026-01-21T15:51:28.5Z" },
- { url = "https://files.pythonhosted.org/packages/09/9f/c176f5e9717f7c91becfe0f55a52ae445d3f7326b4a2cf355978c51b7913/pandas-3.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:447b2d68ac5edcbf94655fe909113a6dba6ef09ad7f9f60c80477825b6c489fe", size = 9900213, upload-time = "2026-01-21T15:51:30.955Z" },
- { url = "https://files.pythonhosted.org/packages/d9/e7/63ad4cc10b257b143e0a5ebb04304ad806b4e1a61c5da25f55896d2ca0f4/pandas-3.0.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:debb95c77ff3ed3ba0d9aa20c3a2f19165cc7956362f9873fce1ba0a53819d70", size = 10428768, upload-time = "2026-01-21T15:51:33.018Z" },
- { url = "https://files.pythonhosted.org/packages/9e/0e/4e4c2d8210f20149fd2248ef3fff26623604922bd564d915f935a06dd63d/pandas-3.0.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fedabf175e7cd82b69b74c30adbaa616de301291a5231138d7242596fc296a8d", size = 10882954, upload-time = "2026-01-21T15:51:35.287Z" },
- { url = "https://files.pythonhosted.org/packages/c6/60/c9de8ac906ba1f4d2250f8a951abe5135b404227a55858a75ad26f84db47/pandas-3.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:412d1a89aab46889f3033a386912efcdfa0f1131c5705ff5b668dda88305e986", size = 11430293, upload-time = "2026-01-21T15:51:37.57Z" },
- { url = "https://files.pythonhosted.org/packages/a1/69/806e6637c70920e5787a6d6896fd707f8134c2c55cd761e7249a97b7dc5a/pandas-3.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e979d22316f9350c516479dd3a92252be2937a9531ed3a26ec324198a99cdd49", size = 11952452, upload-time = "2026-01-21T15:51:39.618Z" },
- { url = "https://files.pythonhosted.org/packages/cb/de/918621e46af55164c400ab0ef389c9d969ab85a43d59ad1207d4ddbe30a5/pandas-3.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:083b11415b9970b6e7888800c43c82e81a06cd6b06755d84804444f0007d6bb7", size = 9851081, upload-time = "2026-01-21T15:51:41.758Z" },
- { url = "https://files.pythonhosted.org/packages/91/a1/3562a18dd0bd8c73344bfa26ff90c53c72f827df119d6d6b1dacc84d13e3/pandas-3.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:5db1e62cb99e739fa78a28047e861b256d17f88463c76b8dafc7c1338086dca8", size = 9174610, upload-time = "2026-01-21T15:51:44.312Z" },
- { url = "https://files.pythonhosted.org/packages/ce/26/430d91257eaf366f1737d7a1c158677caaf6267f338ec74e3a1ec444111c/pandas-3.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:697b8f7d346c68274b1b93a170a70974cdc7d7354429894d5927c1effdcccd73", size = 10761999, upload-time = "2026-01-21T15:51:46.899Z" },
- { url = "https://files.pythonhosted.org/packages/ec/1a/954eb47736c2b7f7fe6a9d56b0cb6987773c00faa3c6451a43db4beb3254/pandas-3.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:8cb3120f0d9467ed95e77f67a75e030b67545bcfa08964e349252d674171def2", size = 10410279, upload-time = "2026-01-21T15:51:48.89Z" },
- { url = "https://files.pythonhosted.org/packages/20/fc/b96f3a5a28b250cd1b366eb0108df2501c0f38314a00847242abab71bb3a/pandas-3.0.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:33fd3e6baa72899746b820c31e4b9688c8e1b7864d7aec2de7ab5035c285277a", size = 10330198, upload-time = "2026-01-21T15:51:51.015Z" },
- { url = "https://files.pythonhosted.org/packages/90/b3/d0e2952f103b4fbef1ef22d0c2e314e74fc9064b51cee30890b5e3286ee6/pandas-3.0.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8942e333dc67ceda1095227ad0febb05a3b36535e520154085db632c40ad084", size = 10728513, upload-time = "2026-01-21T15:51:53.387Z" },
- { url = "https://files.pythonhosted.org/packages/76/81/832894f286df828993dc5fd61c63b231b0fb73377e99f6c6c369174cf97e/pandas-3.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:783ac35c4d0fe0effdb0d67161859078618b1b6587a1af15928137525217a721", size = 11345550, upload-time = "2026-01-21T15:51:55.329Z" },
- { url = "https://files.pythonhosted.org/packages/34/a0/ed160a00fb4f37d806406bc0a79a8b62fe67f29d00950f8d16203ff3409b/pandas-3.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:125eb901e233f155b268bbef9abd9afb5819db74f0e677e89a61b246228c71ac", size = 11799386, upload-time = "2026-01-21T15:51:57.457Z" },
- { url = "https://files.pythonhosted.org/packages/36/c8/2ac00d7255252c5e3cf61b35ca92ca25704b0188f7454ca4aec08a33cece/pandas-3.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b86d113b6c109df3ce0ad5abbc259fe86a1bd4adfd4a31a89da42f84f65509bb", size = 10873041, upload-time = "2026-01-21T15:52:00.034Z" },
- { url = "https://files.pythonhosted.org/packages/e6/3f/a80ac00acbc6b35166b42850e98a4f466e2c0d9c64054161ba9620f95680/pandas-3.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:1c39eab3ad38f2d7a249095f0a3d8f8c22cc0f847e98ccf5bbe732b272e2d9fa", size = 9441003, upload-time = "2026-01-21T15:52:02.281Z" },
+sdist = { url = "https://files.pythonhosted.org/packages/2e/0c/b28ed414f080ee0ad153f848586d61d1878f91689950f037f976ce15f6c8/pandas-3.0.1.tar.gz", hash = "sha256:4186a699674af418f655dbd420ed87f50d56b4cd6603784279d9eef6627823c8", size = 4641901, upload-time = "2026-02-17T22:20:16.434Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/37/51/b467209c08dae2c624873d7491ea47d2b47336e5403309d433ea79c38571/pandas-3.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:476f84f8c20c9f5bc47252b66b4bb25e1a9fc2fa98cead96744d8116cb85771d", size = 10344357, upload-time = "2026-02-17T22:18:38.262Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/f1/e2567ffc8951ab371db2e40b2fe068e36b81d8cf3260f06ae508700e5504/pandas-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ab749dfba921edf641d4036c4c21c0b3ea70fea478165cb98a998fb2a261955", size = 9884543, upload-time = "2026-02-17T22:18:41.476Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/39/327802e0b6d693182403c144edacbc27eb82907b57062f23ef5a4c4a5ea7/pandas-3.0.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8e36891080b87823aff3640c78649b91b8ff6eea3c0d70aeabd72ea43ab069b", size = 10396030, upload-time = "2026-02-17T22:18:43.822Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/fe/89d77e424365280b79d99b3e1e7d606f5165af2f2ecfaf0c6d24c799d607/pandas-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:532527a701281b9dd371e2f582ed9094f4c12dd9ffb82c0c54ee28d8ac9520c4", size = 10876435, upload-time = "2026-02-17T22:18:45.954Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/a6/2a75320849dd154a793f69c951db759aedb8d1dd3939eeacda9bdcfa1629/pandas-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:356e5c055ed9b0da1580d465657bc7d00635af4fd47f30afb23025352ba764d1", size = 11405133, upload-time = "2026-02-17T22:18:48.533Z" },
+ { url = "https://files.pythonhosted.org/packages/58/53/1d68fafb2e02d7881df66aa53be4cd748d25cbe311f3b3c85c93ea5d30ca/pandas-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d810036895f9ad6345b8f2a338dd6998a74e8483847403582cab67745bff821", size = 11932065, upload-time = "2026-02-17T22:18:50.837Z" },
+ { url = "https://files.pythonhosted.org/packages/75/08/67cc404b3a966b6df27b38370ddd96b3b023030b572283d035181854aac5/pandas-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:536232a5fe26dd989bd633e7a0c450705fdc86a207fec7254a55e9a22950fe43", size = 9741627, upload-time = "2026-02-17T22:18:53.905Z" },
+ { url = "https://files.pythonhosted.org/packages/86/4f/caf9952948fb00d23795f09b893d11f1cacb384e666854d87249530f7cbe/pandas-3.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f463ebfd8de7f326d38037c7363c6dacb857c5881ab8961fb387804d6daf2f7", size = 9052483, upload-time = "2026-02-17T22:18:57.31Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/48/aad6ec4f8d007534c091e9a7172b3ec1b1ee6d99a9cbb936b5eab6c6cf58/pandas-3.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5272627187b5d9c20e55d27caf5f2cd23e286aba25cadf73c8590e432e2b7262", size = 10317509, upload-time = "2026-02-17T22:18:59.498Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/14/5990826f779f79148ae9d3a2c39593dc04d61d5d90541e71b5749f35af95/pandas-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:661e0f665932af88c7877f31da0dc743fe9c8f2524bdffe23d24fdcb67ef9d56", size = 9860561, upload-time = "2026-02-17T22:19:02.265Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/80/f01ff54664b6d70fed71475543d108a9b7c888e923ad210795bef04ffb7d/pandas-3.0.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:75e6e292ff898679e47a2199172593d9f6107fd2dd3617c22c2946e97d5df46e", size = 10365506, upload-time = "2026-02-17T22:19:05.017Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/85/ab6d04733a7d6ff32bfc8382bf1b07078228f5d6ebec5266b91bfc5c4ff7/pandas-3.0.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1ff8cf1d2896e34343197685f432450ec99a85ba8d90cce2030c5eee2ef98791", size = 10873196, upload-time = "2026-02-17T22:19:07.204Z" },
+ { url = "https://files.pythonhosted.org/packages/48/a9/9301c83d0b47c23ac5deab91c6b39fd98d5b5db4d93b25df8d381451828f/pandas-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eca8b4510f6763f3d37359c2105df03a7a221a508f30e396a51d0713d462e68a", size = 11370859, upload-time = "2026-02-17T22:19:09.436Z" },
+ { url = "https://files.pythonhosted.org/packages/59/fe/0c1fc5bd2d29c7db2ab372330063ad555fb83e08422829c785f5ec2176ca/pandas-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:06aff2ad6f0b94a17822cf8b83bbb563b090ed82ff4fe7712db2ce57cd50d9b8", size = 11924584, upload-time = "2026-02-17T22:19:11.562Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/7d/216a1588b65a7aa5f4535570418a599d943c85afb1d95b0876fc00aa1468/pandas-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fea306c783e28884c29057a1d9baa11a349bbf99538ec1da44c8476563d1b25", size = 9742769, upload-time = "2026-02-17T22:19:13.926Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/cb/810a22a6af9a4e97c8ab1c946b47f3489c5bca5adc483ce0ffc84c9cc768/pandas-3.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:a8d37a43c52917427e897cb2e429f67a449327394396a81034a4449b99afda59", size = 9043855, upload-time = "2026-02-17T22:19:16.09Z" },
+ { url = "https://files.pythonhosted.org/packages/92/fa/423c89086cca1f039cf1253c3ff5b90f157b5b3757314aa635f6bf3e30aa/pandas-3.0.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d54855f04f8246ed7b6fc96b05d4871591143c46c0b6f4af874764ed0d2d6f06", size = 10752673, upload-time = "2026-02-17T22:19:18.304Z" },
+ { url = "https://files.pythonhosted.org/packages/22/23/b5a08ec1f40020397f0faba72f1e2c11f7596a6169c7b3e800abff0e433f/pandas-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e1b677accee34a09e0dc2ce5624e4a58a1870ffe56fc021e9caf7f23cd7668f", size = 10404967, upload-time = "2026-02-17T22:19:20.726Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/81/94841f1bb4afdc2b52a99daa895ac2c61600bb72e26525ecc9543d453ebc/pandas-3.0.1-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a9cabbdcd03f1b6cd254d6dda8ae09b0252524be1592594c00b7895916cb1324", size = 10320575, upload-time = "2026-02-17T22:19:24.919Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/8b/2ae37d66a5342a83adadfd0cb0b4bf9c3c7925424dd5f40d15d6cfaa35ee/pandas-3.0.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ae2ab1f166668b41e770650101e7090824fd34d17915dd9cd479f5c5e0065e9", size = 10710921, upload-time = "2026-02-17T22:19:27.181Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/61/772b2e2757855e232b7ccf7cb8079a5711becb3a97f291c953def15a833f/pandas-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6bf0603c2e30e2cafac32807b06435f28741135cb8697eae8b28c7d492fc7d76", size = 11334191, upload-time = "2026-02-17T22:19:29.411Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/08/b16c6df3ef555d8495d1d265a7963b65be166785d28f06a350913a4fac78/pandas-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c426422973973cae1f4a23e51d4ae85974f44871b24844e4f7de752dd877098", size = 11782256, upload-time = "2026-02-17T22:19:32.34Z" },
+ { url = "https://files.pythonhosted.org/packages/55/80/178af0594890dee17e239fca96d3d8670ba0f5ff59b7d0439850924a9c09/pandas-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b03f91ae8c10a85c1613102c7bef5229b5379f343030a3ccefeca8a33414cf35", size = 10485047, upload-time = "2026-02-17T22:19:34.605Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/8b/4bb774a998b97e6c2fd62a9e6cfdaae133b636fd1c468f92afb4ae9a447a/pandas-3.0.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:99d0f92ed92d3083d140bf6b97774f9f13863924cf3f52a70711f4e7588f9d0a", size = 10322465, upload-time = "2026-02-17T22:19:36.803Z" },
+ { url = "https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3b66857e983208654294bb6477b8a63dee26b37bdd0eb34d010556e91261784f", size = 9910632, upload-time = "2026-02-17T22:19:39.001Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/f7/b449ffb3f68c11da12fc06fbf6d2fa3a41c41e17d0284d23a79e1c13a7e4/pandas-3.0.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56cf59638bf24dc9bdf2154c81e248b3289f9a09a6d04e63608c159022352749", size = 10440535, upload-time = "2026-02-17T22:19:41.157Z" },
+ { url = "https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1a9f55e0f46951874b863d1f3906dcb57df2d9be5c5847ba4dfb55b2c815249", size = 10893940, upload-time = "2026-02-17T22:19:43.493Z" },
+ { url = "https://files.pythonhosted.org/packages/03/30/f1b502a72468c89412c1b882a08f6eed8a4ee9dc033f35f65d0663df6081/pandas-3.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1849f0bba9c8a2fb0f691d492b834cc8dadf617e29015c66e989448d58d011ee", size = 11442711, upload-time = "2026-02-17T22:19:46.074Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/f0/ebb6ddd8fc049e98cabac5c2924d14d1dda26a20adb70d41ea2e428d3ec4/pandas-3.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3d288439e11b5325b02ae6e9cc83e6805a62c40c5a6220bea9beb899c073b1c", size = 11963918, upload-time = "2026-02-17T22:19:48.838Z" },
+ { url = "https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:93325b0fe372d192965f4cca88d97667f49557398bbf94abdda3bf1b591dbe66", size = 9862099, upload-time = "2026-02-17T22:19:51.081Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/b7/6af9aac41ef2456b768ef0ae60acf8abcebb450a52043d030a65b4b7c9bd/pandas-3.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:97ca08674e3287c7148f4858b01136f8bdfe7202ad25ad04fec602dd1d29d132", size = 9185333, upload-time = "2026-02-17T22:19:53.266Z" },
+ { url = "https://files.pythonhosted.org/packages/66/fc/848bb6710bc6061cb0c5badd65b92ff75c81302e0e31e496d00029fe4953/pandas-3.0.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:58eeb1b2e0fb322befcf2bbc9ba0af41e616abadb3d3414a6bc7167f6cbfce32", size = 10772664, upload-time = "2026-02-17T22:19:55.806Z" },
+ { url = "https://files.pythonhosted.org/packages/69/5c/866a9bbd0f79263b4b0db6ec1a341be13a1473323f05c122388e0f15b21d/pandas-3.0.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cd9af1276b5ca9e298bd79a26bda32fa9cc87ed095b2a9a60978d2ca058eaf87", size = 10421286, upload-time = "2026-02-17T22:19:58.091Z" },
+ { url = "https://files.pythonhosted.org/packages/51/a4/2058fb84fb1cfbfb2d4a6d485e1940bb4ad5716e539d779852494479c580/pandas-3.0.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f87a04984d6b63788327cd9f79dda62b7f9043909d2440ceccf709249ca988", size = 10342050, upload-time = "2026-02-17T22:20:01.376Z" },
+ { url = "https://files.pythonhosted.org/packages/22/1b/674e89996cc4be74db3c4eb09240c4bb549865c9c3f5d9b086ff8fcfbf00/pandas-3.0.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85fe4c4df62e1e20f9db6ebfb88c844b092c22cd5324bdcf94bfa2fc1b391221", size = 10740055, upload-time = "2026-02-17T22:20:04.328Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/f8/e954b750764298c22fa4614376531fe63c521ef517e7059a51f062b87dca/pandas-3.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:331ca75a2f8672c365ae25c0b29e46f5ac0c6551fdace8eec4cd65e4fac271ff", size = 11357632, upload-time = "2026-02-17T22:20:06.647Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/02/c6e04b694ffd68568297abd03588b6d30295265176a5c01b7459d3bc35a3/pandas-3.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:15860b1fdb1973fffade772fdb931ccf9b2f400a3f5665aef94a00445d7d8dd5", size = 11810974, upload-time = "2026-02-17T22:20:08.946Z" },
+ { url = "https://files.pythonhosted.org/packages/89/41/d7dfb63d2407f12055215070c42fc6ac41b66e90a2946cdc5e759058398b/pandas-3.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:44f1364411d5670efa692b146c748f4ed013df91ee91e9bec5677fb1fd58b937", size = 10884622, upload-time = "2026-02-17T22:20:11.711Z" },
+ { url = "https://files.pythonhosted.org/packages/68/b0/34937815889fa982613775e4b97fddd13250f11012d769949c5465af2150/pandas-3.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:108dd1790337a494aa80e38def654ca3f0968cf4f362c85f44c15e471667102d", size = 9452085, upload-time = "2026-02-17T22:20:14.331Z" },
]
[[package]]
@@ -908,11 +925,11 @@ wheels = [
[[package]]
name = "platformdirs"
-version = "4.5.1"
+version = "4.9.4"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/19/56/8d4c30c8a1d07013911a8fdbd8f89440ef9f08d07a1b50ab8ca8be5a20f9/platformdirs-4.9.4.tar.gz", hash = "sha256:1ec356301b7dc906d83f371c8f487070e99d3ccf9e501686456394622a01a934", size = 28737, upload-time = "2026-03-05T18:34:13.271Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" },
+ { url = "https://files.pythonhosted.org/packages/63/d7/97f7e3a6abb67d8080dd406fd4df842c2be0efaf712d1c899c32a075027c/platformdirs-4.9.4-py3-none-any.whl", hash = "sha256:68a9a4619a666ea6439f2ff250c12a853cd1cbd5158d258bd824a7df6be2f868", size = 21216, upload-time = "2026-03-05T18:34:12.172Z" },
]
[[package]]
@@ -1253,6 +1270,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
]
+[[package]]
+name = "python-discovery"
+version = "1.2.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "filelock" },
+ { name = "platformdirs" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b9/88/815e53084c5079a59df912825a279f41dd2e0df82281770eadc732f5352c/python_discovery-1.2.1.tar.gz", hash = "sha256:180c4d114bff1c32462537eac5d6a332b768242b76b69c0259c7d14b1b680c9e", size = 58457, upload-time = "2026-03-26T22:30:44.496Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/67/0f/019d3949a40280f6193b62bc010177d4ce702d0fce424322286488569cd3/python_discovery-1.2.1-py3-none-any.whl", hash = "sha256:b6a957b24c1cd79252484d3566d1b49527581d46e789aaf43181005e56201502", size = 31674, upload-time = "2026-03-26T22:30:43.396Z" },
+]
+
[[package]]
name = "pytokens"
version = "0.4.1"
@@ -1330,7 +1360,7 @@ wheels = [
[[package]]
name = "requests"
-version = "2.32.5"
+version = "2.33.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
@@ -1338,9 +1368,9 @@ dependencies = [
{ name = "idna" },
{ name = "urllib3" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/34/64/8860370b167a9721e8956ae116825caff829224fbca0ca6e7bf8ddef8430/requests-2.33.0.tar.gz", hash = "sha256:c7ebc5e8b0f21837386ad0e1c8fe8b829fa5f544d8df3b2253bff14ef29d7652", size = 134232, upload-time = "2026-03-25T15:10:41.586Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
+ { url = "https://files.pythonhosted.org/packages/56/5d/c814546c2333ceea4ba42262d8c4d55763003e767fa169adc693bd524478/requests-2.33.0-py3-none-any.whl", hash = "sha256:3324635456fa185245e24865e810cecec7b4caf933d7eb133dcde67d48cee69b", size = 65017, upload-time = "2026-03-25T15:10:40.382Z" },
]
[[package]]
@@ -1537,16 +1567,17 @@ wheels = [
[[package]]
name = "virtualenv"
-version = "20.36.1"
+version = "21.2.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "distlib" },
{ name = "filelock" },
{ name = "platformdirs" },
+ { name = "python-discovery" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/aa/a3/4d310fa5f00863544e1d0f4de93bddec248499ccf97d4791bc3122c9d4f3/virtualenv-20.36.1.tar.gz", hash = "sha256:8befb5c81842c641f8ee658481e42641c68b5eab3521d8e092d18320902466ba", size = 6032239, upload-time = "2026-01-09T18:21:01.296Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/aa/92/58199fe10049f9703c2666e809c4f686c54ef0a68b0f6afccf518c0b1eb9/virtualenv-21.2.0.tar.gz", hash = "sha256:1720dc3a62ef5b443092e3f499228599045d7fea4c79199770499df8becf9098", size = 5840618, upload-time = "2026-03-09T17:24:38.013Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/59/7d02447a55b2e55755011a647479041bc92a82e143f96a8195cb33bd0a1c/virtualenv-21.2.0-py3-none-any.whl", hash = "sha256:1bd755b504931164a5a496d217c014d098426cddc79363ad66ac78125f9d908f", size = 5825084, upload-time = "2026-03-09T17:24:35.378Z" },
]
[[package]]
From 1f28d730824b6cb721415985194c5f4567e42ea7 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 16 Mar 2026 20:36:38 +0000
Subject: [PATCH 09/26] feat!: eliminate /files/{file_id} GET differences
---
.stats.yml | 4 ++--
api.md | 9 +++++++--
src/llama_stack_client/resources/files.py | 8 ++++----
src/llama_stack_client/types/__init__.py | 1 +
src/llama_stack_client/types/file.py | 6 +++---
.../types/file_content_response.py | 7 +++++++
tests/api_resources/test_files.py | 12 ++++++------
7 files changed, 30 insertions(+), 17 deletions(-)
create mode 100644 src/llama_stack_client/types/file_content_response.py
diff --git a/.stats.yml b/.stats.yml
index 0924840b..1e2a7c27 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 102
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-514340e257a7c8ccfb8f64403fa1aa5824cd256a774c49c232f539ea7590262f.yml
-openapi_spec_hash: 903087bc571fae827d1c6883580864f8
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-2bfb98a935a0d6db55218ab2e51624d761c70d7b7e4654bc6ac8193e51050bcf.yml
+openapi_spec_hash: 6572ff5543514964b200d06a47558f5d
config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
diff --git a/api.md b/api.md
index 5e6ba72b..d7defc35 100644
--- a/api.md
+++ b/api.md
@@ -409,7 +409,12 @@ Methods:
Types:
```python
-from llama_stack_client.types import DeleteFileResponse, File, ListFilesResponse
+from llama_stack_client.types import (
+ DeleteFileResponse,
+ File,
+ ListFilesResponse,
+ FileContentResponse,
+)
```
Methods:
@@ -418,7 +423,7 @@ Methods:
- client.files.retrieve(file_id) -> File
- client.files.list(\*\*params) -> SyncOpenAICursorPage[File]
- client.files.delete(file_id) -> DeleteFileResponse
-- client.files.content(file_id) -> object
+- client.files.content(file_id) -> str
# Batches
diff --git a/src/llama_stack_client/resources/files.py b/src/llama_stack_client/resources/files.py
index 54a34170..c11b3dce 100644
--- a/src/llama_stack_client/resources/files.py
+++ b/src/llama_stack_client/resources/files.py
@@ -245,7 +245,7 @@ def content(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> str:
"""
Retrieve file content
@@ -267,7 +267,7 @@ def content(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=str,
)
@@ -484,7 +484,7 @@ async def content(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> object:
+ ) -> str:
"""
Retrieve file content
@@ -506,7 +506,7 @@ async def content(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=object,
+ cast_to=str,
)
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index e8b23d5e..7fee4b11 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -57,6 +57,7 @@
from .batch_cancel_response import BatchCancelResponse as BatchCancelResponse
from .batch_create_response import BatchCreateResponse as BatchCreateResponse
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .file_content_response import FileContentResponse as FileContentResponse
from .list_prompts_response import ListPromptsResponse as ListPromptsResponse
from .list_shields_response import ListShieldsResponse as ListShieldsResponse
from .model_register_params import ModelRegisterParams as ModelRegisterParams
diff --git a/src/llama_stack_client/types/file.py b/src/llama_stack_client/types/file.py
index d9d106e7..e79a7ea8 100644
--- a/src/llama_stack_client/types/file.py
+++ b/src/llama_stack_client/types/file.py
@@ -26,14 +26,14 @@ class File(BaseModel):
created_at: int
"""The Unix timestamp (in seconds) for when the file was created."""
- expires_at: int
- """The Unix timestamp (in seconds) for when the file expires."""
-
filename: str
"""The name of the file."""
purpose: Literal["assistants", "batch"]
"""The intended purpose of the file."""
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the file expires."""
+
object: Optional[Literal["file"]] = None
"""The object type, which is always 'file'."""
diff --git a/src/llama_stack_client/types/file_content_response.py b/src/llama_stack_client/types/file_content_response.py
new file mode 100644
index 00000000..c7f72a75
--- /dev/null
+++ b/src/llama_stack_client/types/file_content_response.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import TypeAlias
+
+__all__ = ["FileContentResponse"]
+
+FileContentResponse: TypeAlias = str
diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py
index 22ac6d8a..368b492f 100644
--- a/tests/api_resources/test_files.py
+++ b/tests/api_resources/test_files.py
@@ -186,7 +186,7 @@ def test_method_content(self, client: LlamaStackClient) -> None:
file = client.files.content(
"file_id",
)
- assert_matches_type(object, file, path=["response"])
+ assert_matches_type(str, file, path=["response"])
@parametrize
def test_raw_response_content(self, client: LlamaStackClient) -> None:
@@ -197,7 +197,7 @@ def test_raw_response_content(self, client: LlamaStackClient) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(object, file, path=["response"])
+ assert_matches_type(str, file, path=["response"])
@parametrize
def test_streaming_response_content(self, client: LlamaStackClient) -> None:
@@ -208,7 +208,7 @@ def test_streaming_response_content(self, client: LlamaStackClient) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
- assert_matches_type(object, file, path=["response"])
+ assert_matches_type(str, file, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -387,7 +387,7 @@ async def test_method_content(self, async_client: AsyncLlamaStackClient) -> None
file = await async_client.files.content(
"file_id",
)
- assert_matches_type(object, file, path=["response"])
+ assert_matches_type(str, file, path=["response"])
@parametrize
async def test_raw_response_content(self, async_client: AsyncLlamaStackClient) -> None:
@@ -398,7 +398,7 @@ async def test_raw_response_content(self, async_client: AsyncLlamaStackClient) -
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
- assert_matches_type(object, file, path=["response"])
+ assert_matches_type(str, file, path=["response"])
@parametrize
async def test_streaming_response_content(self, async_client: AsyncLlamaStackClient) -> None:
@@ -409,7 +409,7 @@ async def test_streaming_response_content(self, async_client: AsyncLlamaStackCli
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
- assert_matches_type(object, file, path=["response"])
+ assert_matches_type(str, file, path=["response"])
assert cast(Any, response.is_closed) is True
From 1df7e2605e78572eccc53aa8db1e44d987106a9b Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 16 Mar 2026 20:49:06 +0000
Subject: [PATCH 10/26] chore(internal): tweak CI branches
---
.github/workflows/ci.yml | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e7be7bc5..4dac4bbc 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,12 +1,14 @@
name: CI
on:
push:
- branches-ignore:
- - 'generated'
- - 'codegen/**'
- - 'integrated/**'
- - 'stl-preview-head/**'
- - 'stl-preview-base/**'
+ branches:
+ - '**'
+ - '!integrated/**'
+ - '!stl-preview-head/**'
+ - '!stl-preview-base/**'
+ - '!generated'
+ - '!codegen/**'
+ - 'codegen/stl/**'
pull_request:
branches-ignore:
- 'stl-preview-head/**'
From 94a14dad88ed55d3f2baf1de8eb30ba529fb9818 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 18 Mar 2026 15:11:39 +0000
Subject: [PATCH 11/26] refactor: rename rag-runtime provider to file-search
---
.stats.yml | 4 ++--
src/llama_stack_client/types/query_chunks_response.py | 4 ++--
src/llama_stack_client/types/vector_io_insert_params.py | 4 ++--
src/llama_stack_client/types/vector_store_search_response.py | 4 ++--
.../types/vector_stores/file_content_response.py | 4 ++--
5 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 1e2a7c27..c3d0a549 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 102
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-2bfb98a935a0d6db55218ab2e51624d761c70d7b7e4654bc6ac8193e51050bcf.yml
-openapi_spec_hash: 6572ff5543514964b200d06a47558f5d
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-630dfd3e6352479efac56ddb8696a89c8e981c97f7d3056d0c163a95427bca3b.yml
+openapi_spec_hash: 17a085582e81bb2e3ec0abebdb065394
config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
diff --git a/src/llama_stack_client/types/query_chunks_response.py b/src/llama_stack_client/types/query_chunks_response.py
index 38371de1..5eef720d 100644
--- a/src/llama_stack_client/types/query_chunks_response.py
+++ b/src/llama_stack_client/types/query_chunks_response.py
@@ -33,7 +33,7 @@ class ChunkChunkMetadata(BaseModel):
"""
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that
will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not expected to change after.
Use `Chunk.metadata` for metadata that will be used in the context during inference.
"""
@@ -149,7 +149,7 @@ class Chunk(BaseModel):
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store
additional information about the chunk that will not be used in the context
during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not
expected to change after. Use `Chunk.metadata` for metadata that will be used in
the context during inference.
"""
diff --git a/src/llama_stack_client/types/vector_io_insert_params.py b/src/llama_stack_client/types/vector_io_insert_params.py
index 72de4f71..cd0e6e37 100644
--- a/src/llama_stack_client/types/vector_io_insert_params.py
+++ b/src/llama_stack_client/types/vector_io_insert_params.py
@@ -43,7 +43,7 @@ class ChunkChunkMetadata(TypedDict, total=False):
"""
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that
will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not expected to change after.
Use `Chunk.metadata` for metadata that will be used in the context during inference.
"""
@@ -156,7 +156,7 @@ class Chunk(TypedDict, total=False):
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store
additional information about the chunk that will not be used in the context
during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not
expected to change after. Use `Chunk.metadata` for metadata that will be used in
the context during inference.
"""
diff --git a/src/llama_stack_client/types/vector_store_search_response.py b/src/llama_stack_client/types/vector_store_search_response.py
index 743fe609..5f799ac7 100644
--- a/src/llama_stack_client/types/vector_store_search_response.py
+++ b/src/llama_stack_client/types/vector_store_search_response.py
@@ -18,7 +18,7 @@ class DataContentChunkMetadata(BaseModel):
"""
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that
will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not expected to change after.
Use `Chunk.metadata` for metadata that will be used in the context during inference.
"""
@@ -53,7 +53,7 @@ class DataContent(BaseModel):
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store
additional information about the chunk that will not be used in the context
during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not
expected to change after. Use `Chunk.metadata` for metadata that will be used in
the context during inference.
"""
diff --git a/src/llama_stack_client/types/vector_stores/file_content_response.py b/src/llama_stack_client/types/vector_stores/file_content_response.py
index a489860f..3c1393c6 100644
--- a/src/llama_stack_client/types/vector_stores/file_content_response.py
+++ b/src/llama_stack_client/types/vector_stores/file_content_response.py
@@ -18,7 +18,7 @@ class DataChunkMetadata(BaseModel):
"""
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that
will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not expected to change after.
Use `Chunk.metadata` for metadata that will be used in the context during inference.
"""
@@ -53,7 +53,7 @@ class Data(BaseModel):
`ChunkMetadata` is backend metadata for a `Chunk` that is used to store
additional information about the chunk that will not be used in the context
during inference, but is required for backend functionality. The `ChunkMetadata`
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not
+ is set during chunk creation in `FileSearchToolRuntimeImpl().insert()`and is not
expected to change after. Use `Chunk.metadata` for metadata that will be used in
the context during inference.
"""
From 9b288d553ae83860fbe1d8ee9352532ed04ddd9b Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:00:09 +0000
Subject: [PATCH 12/26] fix: sanitize endpoint path params
---
src/llama_stack_client/_utils/_path.py | 133 ++++++++++++++++++
.../resources/alpha/admin.py | 6 +-
.../resources/alpha/benchmarks.py | 10 +-
.../resources/alpha/eval/eval.py | 18 +--
.../resources/alpha/eval/jobs.py | 25 +++-
src/llama_stack_client/resources/batches.py | 10 +-
.../resources/beta/datasets.py | 18 +--
.../resources/chat/completions.py | 6 +-
.../resources/conversations/conversations.py | 14 +-
.../resources/conversations/items.py | 30 ++--
src/llama_stack_client/resources/files.py | 14 +-
.../resources/models/models.py | 10 +-
.../resources/prompts/prompts.py | 18 +--
.../resources/prompts/versions.py | 5 +-
src/llama_stack_client/resources/providers.py | 5 +-
.../resources/responses/input_items.py | 6 +-
.../resources/responses/responses.py | 10 +-
.../resources/scoring_functions.py | 10 +-
src/llama_stack_client/resources/shields.py | 10 +-
.../resources/toolgroups.py | 10 +-
src/llama_stack_client/resources/tools.py | 6 +-
.../resources/vector_stores/file_batches.py | 42 ++++--
.../resources/vector_stores/files.py | 46 ++++--
.../resources/vector_stores/vector_stores.py | 18 +--
.../types/file_content_response.py | 6 +
tests/test_utils/test_path.py | 95 +++++++++++++
26 files changed, 443 insertions(+), 138 deletions(-)
create mode 100644 src/llama_stack_client/_utils/_path.py
create mode 100644 tests/test_utils/test_path.py
diff --git a/src/llama_stack_client/_utils/_path.py b/src/llama_stack_client/_utils/_path.py
new file mode 100644
index 00000000..2859bdec
--- /dev/null
+++ b/src/llama_stack_client/_utils/_path.py
@@ -0,0 +1,133 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from __future__ import annotations
+
+import re
+from typing import (
+ Any,
+ Mapping,
+ Callable,
+)
+from urllib.parse import quote
+
+# Matches '.' or '..' where each dot is either literal or percent-encoded (%2e / %2E).
+_DOT_SEGMENT_RE = re.compile(r"^(?:\.|%2[eE]){1,2}$")
+
+_PLACEHOLDER_RE = re.compile(r"\{(\w+)\}")
+
+
+def _quote_path_segment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI path segment.
+
+ Considers characters not in `pchar` set from RFC 3986 §3.3 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.3
+ """
+ # quote() already treats unreserved characters (letters, digits, and -._~)
+ # as safe, so we only need to add sub-delims, ':', and '@'.
+ # Notably, unlike the default `safe` for quote(), / is unsafe and must be quoted.
+ return quote(value, safe="!$&'()*+,;=:@")
+
+
+def _quote_query_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI query string.
+
+ Considers &, = and characters not in `query` set from RFC 3986 §3.4 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.4
+ """
+ return quote(value, safe="!$'()*+,;:@/?")
+
+
+def _quote_fragment_part(value: str) -> str:
+ """Percent-encode `value` for use in a URI fragment.
+
+ Considers characters not in `fragment` set from RFC 3986 §3.5 to be unsafe.
+ https://datatracker.ietf.org/doc/html/rfc3986#section-3.5
+ """
+ return quote(value, safe="!$&'()*+,;=:@/?")
+
+
+def _interpolate(
+ template: str,
+ values: Mapping[str, Any],
+ quoter: Callable[[str], str],
+) -> str:
+ """Replace {name} placeholders in `template`, quoting each value with `quoter`.
+
+ Placeholder names are looked up in `values`.
+
+ Raises:
+ KeyError: If a placeholder is not found in `values`.
+ """
+ # re.split with a capturing group returns alternating
+ # [text, name, text, name, ..., text] elements.
+ parts = _PLACEHOLDER_RE.split(template)
+
+ for i in range(1, len(parts), 2):
+ name = parts[i]
+ if name not in values:
+ raise KeyError(f"a value for placeholder {{{name}}} was not provided")
+ val = values[name]
+ if val is None:
+ parts[i] = "null"
+ elif isinstance(val, bool):
+ parts[i] = "true" if val else "false"
+ else:
+ parts[i] = quoter(str(values[name]))
+
+ return "".join(parts)
+
+
+def path_template(template: str, /, **kwargs: Any) -> str:
+ """Interpolate {name} placeholders in `template` from keyword arguments.
+
+ Args:
+ template: The template string containing {name} placeholders.
+ **kwargs: Keyword arguments to interpolate into the template.
+
+ Returns:
+ The template with placeholders interpolated and percent-encoded.
+
+ Safe characters for percent-encoding are dependent on the URI component.
+ Placeholders in path and fragment portions are percent-encoded where the `segment`
+ and `fragment` sets from RFC 3986 respectively are considered safe.
+ Placeholders in the query portion are percent-encoded where the `query` set from
+ RFC 3986 §3.3 is considered safe except for = and & characters.
+
+ Raises:
+ KeyError: If a placeholder is not found in `kwargs`.
+ ValueError: If resulting path contains /./ or /../ segments (including percent-encoded dot-segments).
+ """
+ # Split the template into path, query, and fragment portions.
+ fragment_template: str | None = None
+ query_template: str | None = None
+
+ rest = template
+ if "#" in rest:
+ rest, fragment_template = rest.split("#", 1)
+ if "?" in rest:
+ rest, query_template = rest.split("?", 1)
+ path_template = rest
+
+ # Interpolate each portion with the appropriate quoting rules.
+ path_result = _interpolate(path_template, kwargs, _quote_path_segment_part)
+
+ # Reject dot-segments (. and ..) in the final assembled path. The check
+ # runs after interpolation so that adjacent placeholders or a mix of static
+ # text and placeholders that together form a dot-segment are caught.
+ # Also reject percent-encoded dot-segments to protect against incorrectly
+ # implemented normalization in servers/proxies.
+ for segment in path_result.split("/"):
+ if _DOT_SEGMENT_RE.match(segment):
+ raise ValueError(f"Constructed path {path_result!r} contains dot-segment {segment!r} which is not allowed")
+
+ result = path_result
+ if query_template is not None:
+ result += "?" + _interpolate(query_template, kwargs, _quote_query_part)
+ if fragment_template is not None:
+ result += "#" + _interpolate(fragment_template, kwargs, _quote_fragment_part)
+
+ return result
diff --git a/src/llama_stack_client/resources/alpha/admin.py b/src/llama_stack_client/resources/alpha/admin.py
index f84fa640..766181c6 100644
--- a/src/llama_stack_client/resources/alpha/admin.py
+++ b/src/llama_stack_client/resources/alpha/admin.py
@@ -14,7 +14,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -102,7 +102,7 @@ def inspect_provider(
if not provider_id:
raise ValueError(f"Expected a non-empty value for `provider_id` but received {provider_id!r}")
return self._get(
- f"/v1alpha/admin/providers/{provider_id}",
+ path_template("/v1alpha/admin/providers/{provider_id}", provider_id=provider_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -260,7 +260,7 @@ async def inspect_provider(
if not provider_id:
raise ValueError(f"Expected a non-empty value for `provider_id` but received {provider_id!r}")
return await self._get(
- f"/v1alpha/admin/providers/{provider_id}",
+ path_template("/v1alpha/admin/providers/{provider_id}", provider_id=provider_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/alpha/benchmarks.py b/src/llama_stack_client/resources/alpha/benchmarks.py
index 8cfb3a1b..9778fac1 100644
--- a/src/llama_stack_client/resources/alpha/benchmarks.py
+++ b/src/llama_stack_client/resources/alpha/benchmarks.py
@@ -14,7 +14,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -80,7 +80,7 @@ def retrieve(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return self._get(
- f"/v1alpha/eval/benchmarks/{benchmark_id}",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -201,7 +201,7 @@ def unregister(
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1alpha/eval/benchmarks/{benchmark_id}",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -257,7 +257,7 @@ async def retrieve(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return await self._get(
- f"/v1alpha/eval/benchmarks/{benchmark_id}",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -378,7 +378,7 @@ async def unregister(
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1alpha/eval/benchmarks/{benchmark_id}",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/alpha/eval/eval.py b/src/llama_stack_client/resources/alpha/eval/eval.py
index 97b45674..7488bef9 100644
--- a/src/llama_stack_client/resources/alpha/eval/eval.py
+++ b/src/llama_stack_client/resources/alpha/eval/eval.py
@@ -21,7 +21,7 @@
AsyncJobsResourceWithStreamingResponse,
)
from ...._types import Body, Query, Headers, NotGiven, SequenceNotStr, not_given
-from ...._utils import maybe_transform, async_maybe_transform
+from ...._utils import path_template, maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -112,7 +112,7 @@ def evaluate_rows(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
body=maybe_transform(
{
"benchmark_config": benchmark_config,
@@ -164,7 +164,7 @@ def evaluate_rows_alpha(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
body=maybe_transform(
{
"benchmark_config": benchmark_config,
@@ -210,7 +210,7 @@ def run_eval(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
body=maybe_transform({"benchmark_config": benchmark_config}, eval_run_eval_params.EvalRunEvalParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -249,7 +249,7 @@ def run_eval_alpha(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
body=maybe_transform(
{"benchmark_config": benchmark_config}, eval_run_eval_alpha_params.EvalRunEvalAlphaParams
),
@@ -328,7 +328,7 @@ async def evaluate_rows(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return await self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
body=await async_maybe_transform(
{
"benchmark_config": benchmark_config,
@@ -380,7 +380,7 @@ async def evaluate_rows_alpha(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return await self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
body=await async_maybe_transform(
{
"benchmark_config": benchmark_config,
@@ -426,7 +426,7 @@ async def run_eval(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return await self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
body=await async_maybe_transform(
{"benchmark_config": benchmark_config}, eval_run_eval_params.EvalRunEvalParams
),
@@ -467,7 +467,7 @@ async def run_eval_alpha(
if not benchmark_id:
raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
return await self._post(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
+ path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
body=await async_maybe_transform(
{"benchmark_config": benchmark_config}, eval_run_eval_alpha_params.EvalRunEvalAlphaParams
),
diff --git a/src/llama_stack_client/resources/alpha/eval/jobs.py b/src/llama_stack_client/resources/alpha/eval/jobs.py
index b13c74c0..06b59cf3 100644
--- a/src/llama_stack_client/resources/alpha/eval/jobs.py
+++ b/src/llama_stack_client/resources/alpha/eval/jobs.py
@@ -11,6 +11,7 @@
import httpx
from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given
+from ...._utils import path_template
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -79,7 +80,9 @@ def retrieve(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return self._get(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result",
+ path_template(
+ "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", benchmark_id=benchmark_id, job_id=job_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -116,7 +119,9 @@ def cancel(
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
+ path_template(
+ "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -152,7 +157,9 @@ def status(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return self._get(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
+ path_template(
+ "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -213,7 +220,9 @@ async def retrieve(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return await self._get(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result",
+ path_template(
+ "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", benchmark_id=benchmark_id, job_id=job_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -250,7 +259,9 @@ async def cancel(
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
+ path_template(
+ "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -286,7 +297,9 @@ async def status(
if not job_id:
raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
return await self._get(
- f"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
+ path_template(
+ "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/batches.py b/src/llama_stack_client/resources/batches.py
index 069c5f64..fd6e6040 100644
--- a/src/llama_stack_client/resources/batches.py
+++ b/src/llama_stack_client/resources/batches.py
@@ -15,7 +15,7 @@
from ..types import batch_list_params, batch_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -146,7 +146,7 @@ def retrieve(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._get(
- f"/v1/batches/{batch_id}",
+ path_template("/v1/batches/{batch_id}", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -228,7 +228,7 @@ def cancel(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._post(
- f"/v1/batches/{batch_id}/cancel",
+ path_template("/v1/batches/{batch_id}/cancel", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -348,7 +348,7 @@ async def retrieve(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._get(
- f"/v1/batches/{batch_id}",
+ path_template("/v1/batches/{batch_id}", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -430,7 +430,7 @@ async def cancel(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._post(
- f"/v1/batches/{batch_id}/cancel",
+ path_template("/v1/batches/{batch_id}/cancel", batch_id=batch_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/beta/datasets.py b/src/llama_stack_client/resources/beta/datasets.py
index ed6dd256..e18e8f0e 100644
--- a/src/llama_stack_client/resources/beta/datasets.py
+++ b/src/llama_stack_client/resources/beta/datasets.py
@@ -15,7 +15,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -83,7 +83,7 @@ def retrieve(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return self._get(
- f"/v1beta/datasets/{dataset_id}",
+ path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -145,7 +145,7 @@ def appendrows(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
- f"/v1beta/datasetio/append-rows/{dataset_id}",
+ path_template("/v1beta/datasetio/append-rows/{dataset_id}", dataset_id=dataset_id),
body=maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -197,7 +197,7 @@ def iterrows(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return self._get(
- f"/v1beta/datasetio/iterrows/{dataset_id}",
+ path_template("/v1beta/datasetio/iterrows/{dataset_id}", dataset_id=dataset_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -296,7 +296,7 @@ def unregister(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1beta/datasets/{dataset_id}",
+ path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -352,7 +352,7 @@ async def retrieve(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return await self._get(
- f"/v1beta/datasets/{dataset_id}",
+ path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -414,7 +414,7 @@ async def appendrows(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
- f"/v1beta/datasetio/append-rows/{dataset_id}",
+ path_template("/v1beta/datasetio/append-rows/{dataset_id}", dataset_id=dataset_id),
body=await async_maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -466,7 +466,7 @@ async def iterrows(
if not dataset_id:
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
return await self._get(
- f"/v1beta/datasetio/iterrows/{dataset_id}",
+ path_template("/v1beta/datasetio/iterrows/{dataset_id}", dataset_id=dataset_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -565,7 +565,7 @@ async def unregister(
raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1beta/datasets/{dataset_id}",
+ path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/chat/completions.py b/src/llama_stack_client/resources/chat/completions.py
index 6b479871..182c8517 100644
--- a/src/llama_stack_client/resources/chat/completions.py
+++ b/src/llama_stack_client/resources/chat/completions.py
@@ -14,7 +14,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._utils import path_template, required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -494,7 +494,7 @@ def retrieve(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get(
- f"/v1/chat/completions/{completion_id}",
+ path_template("/v1/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1016,7 +1016,7 @@ async def retrieve(
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return await self._get(
- f"/v1/chat/completions/{completion_id}",
+ path_template("/v1/chat/completions/{completion_id}", completion_id=completion_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/conversations/conversations.py b/src/llama_stack_client/resources/conversations/conversations.py
index 099cbe15..d363e963 100644
--- a/src/llama_stack_client/resources/conversations/conversations.py
+++ b/src/llama_stack_client/resources/conversations/conversations.py
@@ -22,7 +22,7 @@
)
from ...types import conversation_create_params, conversation_update_params
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -136,7 +136,7 @@ def retrieve(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get(
- f"/v1/conversations/{conversation_id}",
+ path_template("/v1/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -174,7 +174,7 @@ def update(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
- f"/v1/conversations/{conversation_id}",
+ path_template("/v1/conversations/{conversation_id}", conversation_id=conversation_id),
body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -210,7 +210,7 @@ def delete(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._delete(
- f"/v1/conversations/{conversation_id}",
+ path_template("/v1/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -316,7 +316,7 @@ async def retrieve(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._get(
- f"/v1/conversations/{conversation_id}",
+ path_template("/v1/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -354,7 +354,7 @@ async def update(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
- f"/v1/conversations/{conversation_id}",
+ path_template("/v1/conversations/{conversation_id}", conversation_id=conversation_id),
body=await async_maybe_transform(
{"metadata": metadata}, conversation_update_params.ConversationUpdateParams
),
@@ -392,7 +392,7 @@ async def delete(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._delete(
- f"/v1/conversations/{conversation_id}",
+ path_template("/v1/conversations/{conversation_id}", conversation_id=conversation_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/conversations/items.py b/src/llama_stack_client/resources/conversations/items.py
index cccc1850..c337272a 100644
--- a/src/llama_stack_client/resources/conversations/items.py
+++ b/src/llama_stack_client/resources/conversations/items.py
@@ -14,7 +14,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -88,7 +88,7 @@ def create(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
- f"/v1/conversations/{conversation_id}/items",
+ path_template("/v1/conversations/{conversation_id}/items", conversation_id=conversation_id),
body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -141,7 +141,7 @@ def list(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
- f"/v1/conversations/{conversation_id}/items",
+ path_template("/v1/conversations/{conversation_id}/items", conversation_id=conversation_id),
page=SyncOpenAICursorPage[ItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -194,7 +194,9 @@ def delete(
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return self._delete(
- f"/v1/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/v1/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -236,7 +238,11 @@ def get(
return cast(
ItemGetResponse,
self._get(
- f"/v1/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/v1/conversations/{conversation_id}/items/{item_id}",
+ conversation_id=conversation_id,
+ item_id=item_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -299,7 +305,7 @@ async def create(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
- f"/v1/conversations/{conversation_id}/items",
+ path_template("/v1/conversations/{conversation_id}/items", conversation_id=conversation_id),
body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -352,7 +358,7 @@ def list(
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
- f"/v1/conversations/{conversation_id}/items",
+ path_template("/v1/conversations/{conversation_id}/items", conversation_id=conversation_id),
page=AsyncOpenAICursorPage[ItemListResponse],
options=make_request_options(
extra_headers=extra_headers,
@@ -405,7 +411,9 @@ async def delete(
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return await self._delete(
- f"/v1/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/v1/conversations/{conversation_id}/items/{item_id}", conversation_id=conversation_id, item_id=item_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -447,7 +455,11 @@ async def get(
return cast(
ItemGetResponse,
await self._get(
- f"/v1/conversations/{conversation_id}/items/{item_id}",
+ path_template(
+ "/v1/conversations/{conversation_id}/items/{item_id}",
+ conversation_id=conversation_id,
+ item_id=item_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/files.py b/src/llama_stack_client/resources/files.py
index c11b3dce..27df58c7 100644
--- a/src/llama_stack_client/resources/files.py
+++ b/src/llama_stack_client/resources/files.py
@@ -15,7 +15,7 @@
from ..types import file_list_params, file_create_params
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -137,7 +137,7 @@ def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -228,7 +228,7 @@ def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -263,7 +263,7 @@ def content(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/v1/files/{file_id}/content",
+ path_template("/v1/files/{file_id}/content", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -376,7 +376,7 @@ async def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -467,7 +467,7 @@ async def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
- f"/v1/files/{file_id}",
+ path_template("/v1/files/{file_id}", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -502,7 +502,7 @@ async def content(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/v1/files/{file_id}/content",
+ path_template("/v1/files/{file_id}/content", file_id=file_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/models/models.py b/src/llama_stack_client/resources/models/models.py
index fbf291ca..3fd88948 100644
--- a/src/llama_stack_client/resources/models/models.py
+++ b/src/llama_stack_client/resources/models/models.py
@@ -24,7 +24,7 @@
)
from ...types import model_register_params
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -94,7 +94,7 @@ def retrieve(
if not model_id:
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
return self._get(
- f"/v1/models/{model_id}",
+ path_template("/v1/models/{model_id}", model_id=model_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -216,7 +216,7 @@ def unregister(
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/models/{model_id}",
+ path_template("/v1/models/{model_id}", model_id=model_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -276,7 +276,7 @@ async def retrieve(
if not model_id:
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
return await self._get(
- f"/v1/models/{model_id}",
+ path_template("/v1/models/{model_id}", model_id=model_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -398,7 +398,7 @@ async def unregister(
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/models/{model_id}",
+ path_template("/v1/models/{model_id}", model_id=model_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/prompts/prompts.py b/src/llama_stack_client/resources/prompts/prompts.py
index 0505ba9f..85fc691f 100644
--- a/src/llama_stack_client/resources/prompts/prompts.py
+++ b/src/llama_stack_client/resources/prompts/prompts.py
@@ -19,7 +19,7 @@
prompt_set_default_version_params,
)
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from .versions import (
VersionsResource,
AsyncVersionsResource,
@@ -145,7 +145,7 @@ def retrieve(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return self._get(
- f"/v1/prompts/{prompt_id}",
+ path_template("/v1/prompts/{prompt_id}", prompt_id=prompt_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -196,7 +196,7 @@ def update(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return self._put(
- f"/v1/prompts/{prompt_id}",
+ path_template("/v1/prompts/{prompt_id}", prompt_id=prompt_id),
body=maybe_transform(
{
"prompt": prompt,
@@ -264,7 +264,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/prompts/{prompt_id}",
+ path_template("/v1/prompts/{prompt_id}", prompt_id=prompt_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -302,7 +302,7 @@ def set_default_version(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return self._put(
- f"/v1/prompts/{prompt_id}/set-default-version",
+ path_template("/v1/prompts/{prompt_id}/set-default-version", prompt_id=prompt_id),
body=maybe_transform({"version": version}, prompt_set_default_version_params.PromptSetDefaultVersionParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -412,7 +412,7 @@ async def retrieve(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return await self._get(
- f"/v1/prompts/{prompt_id}",
+ path_template("/v1/prompts/{prompt_id}", prompt_id=prompt_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -463,7 +463,7 @@ async def update(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return await self._put(
- f"/v1/prompts/{prompt_id}",
+ path_template("/v1/prompts/{prompt_id}", prompt_id=prompt_id),
body=await async_maybe_transform(
{
"prompt": prompt,
@@ -531,7 +531,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/prompts/{prompt_id}",
+ path_template("/v1/prompts/{prompt_id}", prompt_id=prompt_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -569,7 +569,7 @@ async def set_default_version(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return await self._put(
- f"/v1/prompts/{prompt_id}/set-default-version",
+ path_template("/v1/prompts/{prompt_id}/set-default-version", prompt_id=prompt_id),
body=await async_maybe_transform(
{"version": version}, prompt_set_default_version_params.PromptSetDefaultVersionParams
),
diff --git a/src/llama_stack_client/resources/prompts/versions.py b/src/llama_stack_client/resources/prompts/versions.py
index ca74fa31..a6dbc0c7 100644
--- a/src/llama_stack_client/resources/prompts/versions.py
+++ b/src/llama_stack_client/resources/prompts/versions.py
@@ -13,6 +13,7 @@
import httpx
from ..._types import Body, Query, Headers, NotGiven, not_given
+from ..._utils import path_template
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -78,7 +79,7 @@ def list(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return self._get(
- f"/v1/prompts/{prompt_id}/versions",
+ path_template("/v1/prompts/{prompt_id}/versions", prompt_id=prompt_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -140,7 +141,7 @@ async def list(
if not prompt_id:
raise ValueError(f"Expected a non-empty value for `prompt_id` but received {prompt_id!r}")
return await self._get(
- f"/v1/prompts/{prompt_id}/versions",
+ path_template("/v1/prompts/{prompt_id}/versions", prompt_id=prompt_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/src/llama_stack_client/resources/providers.py b/src/llama_stack_client/resources/providers.py
index 9ed42e38..0701883b 100644
--- a/src/llama_stack_client/resources/providers.py
+++ b/src/llama_stack_client/resources/providers.py
@@ -13,6 +13,7 @@
import httpx
from .._types import Body, Query, Headers, NotGiven, not_given
+from .._utils import path_template
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -81,7 +82,7 @@ def retrieve(
if not provider_id:
raise ValueError(f"Expected a non-empty value for `provider_id` but received {provider_id!r}")
return self._get(
- f"/v1/providers/{provider_id}",
+ path_template("/v1/providers/{provider_id}", provider_id=provider_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -164,7 +165,7 @@ async def retrieve(
if not provider_id:
raise ValueError(f"Expected a non-empty value for `provider_id` but received {provider_id!r}")
return await self._get(
- f"/v1/providers/{provider_id}",
+ path_template("/v1/providers/{provider_id}", provider_id=provider_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/responses/input_items.py b/src/llama_stack_client/resources/responses/input_items.py
index 113c6a8a..3daa5441 100644
--- a/src/llama_stack_client/resources/responses/input_items.py
+++ b/src/llama_stack_client/resources/responses/input_items.py
@@ -14,7 +14,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -109,7 +109,7 @@ def list(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get(
- f"/v1/responses/{response_id}/input_items",
+ path_template("/v1/responses/{response_id}/input_items", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -209,7 +209,7 @@ async def list(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return await self._get(
- f"/v1/responses/{response_id}/input_items",
+ path_template("/v1/responses/{response_id}/input_items", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py
index 6ad82e30..070cbb8e 100644
--- a/src/llama_stack_client/resources/responses/responses.py
+++ b/src/llama_stack_client/resources/responses/responses.py
@@ -15,7 +15,7 @@
from ...types import response_list_params, response_create_params
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._utils import path_template, required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -621,7 +621,7 @@ def retrieve(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get(
- f"/v1/responses/{response_id}",
+ path_template("/v1/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -711,7 +711,7 @@ def delete(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._delete(
- f"/v1/responses/{response_id}",
+ path_template("/v1/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1297,7 +1297,7 @@ async def retrieve(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return await self._get(
- f"/v1/responses/{response_id}",
+ path_template("/v1/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -1387,7 +1387,7 @@ async def delete(
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return await self._delete(
- f"/v1/responses/{response_id}",
+ path_template("/v1/responses/{response_id}", response_id=response_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/scoring_functions.py b/src/llama_stack_client/resources/scoring_functions.py
index f63f1631..50aeaebd 100644
--- a/src/llama_stack_client/resources/scoring_functions.py
+++ b/src/llama_stack_client/resources/scoring_functions.py
@@ -15,7 +15,7 @@
from ..types import scoring_function_register_params
from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -80,7 +80,7 @@ def retrieve(
if not scoring_fn_id:
raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
return self._get(
- f"/v1/scoring-functions/{scoring_fn_id}",
+ path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -200,7 +200,7 @@ def unregister(
raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/scoring-functions/{scoring_fn_id}",
+ path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -256,7 +256,7 @@ async def retrieve(
if not scoring_fn_id:
raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
return await self._get(
- f"/v1/scoring-functions/{scoring_fn_id}",
+ path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -376,7 +376,7 @@ async def unregister(
raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/scoring-functions/{scoring_fn_id}",
+ path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/shields.py b/src/llama_stack_client/resources/shields.py
index 9c706a7f..0358c6f0 100644
--- a/src/llama_stack_client/resources/shields.py
+++ b/src/llama_stack_client/resources/shields.py
@@ -15,7 +15,7 @@
from ..types import shield_register_params
from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -80,7 +80,7 @@ def retrieve(
if not identifier:
raise ValueError(f"Expected a non-empty value for `identifier` but received {identifier!r}")
return self._get(
- f"/v1/shields/{identifier}",
+ path_template("/v1/shields/{identifier}", identifier=identifier),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -140,7 +140,7 @@ def delete(
raise ValueError(f"Expected a non-empty value for `identifier` but received {identifier!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/shields/{identifier}",
+ path_template("/v1/shields/{identifier}", identifier=identifier),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -248,7 +248,7 @@ async def retrieve(
if not identifier:
raise ValueError(f"Expected a non-empty value for `identifier` but received {identifier!r}")
return await self._get(
- f"/v1/shields/{identifier}",
+ path_template("/v1/shields/{identifier}", identifier=identifier),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -308,7 +308,7 @@ async def delete(
raise ValueError(f"Expected a non-empty value for `identifier` but received {identifier!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/shields/{identifier}",
+ path_template("/v1/shields/{identifier}", identifier=identifier),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/toolgroups.py b/src/llama_stack_client/resources/toolgroups.py
index 1f399cf7..881ab5d3 100644
--- a/src/llama_stack_client/resources/toolgroups.py
+++ b/src/llama_stack_client/resources/toolgroups.py
@@ -15,7 +15,7 @@
from ..types import toolgroup_register_params
from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -103,7 +103,7 @@ def get(
if not toolgroup_id:
raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
return self._get(
- f"/v1/toolgroups/{toolgroup_id}",
+ path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -185,7 +185,7 @@ def unregister(
raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
- f"/v1/toolgroups/{toolgroup_id}",
+ path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -264,7 +264,7 @@ async def get(
if not toolgroup_id:
raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
return await self._get(
- f"/v1/toolgroups/{toolgroup_id}",
+ path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -346,7 +346,7 @@ async def unregister(
raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
- f"/v1/toolgroups/{toolgroup_id}",
+ path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/tools.py b/src/llama_stack_client/resources/tools.py
index 00a3be6b..09a4c19f 100644
--- a/src/llama_stack_client/resources/tools.py
+++ b/src/llama_stack_client/resources/tools.py
@@ -15,7 +15,7 @@
from ..types import tool_list_params
from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
+from .._utils import path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -116,7 +116,7 @@ def get(
if not tool_name:
raise ValueError(f"Expected a non-empty value for `tool_name` but received {tool_name!r}")
return self._get(
- f"/v1/tools/{tool_name}",
+ path_template("/v1/tools/{tool_name}", tool_name=tool_name),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -208,7 +208,7 @@ async def get(
if not tool_name:
raise ValueError(f"Expected a non-empty value for `tool_name` but received {tool_name!r}")
return await self._get(
- f"/v1/tools/{tool_name}",
+ path_template("/v1/tools/{tool_name}", tool_name=tool_name),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
diff --git a/src/llama_stack_client/resources/vector_stores/file_batches.py b/src/llama_stack_client/resources/vector_stores/file_batches.py
index 515d1ab5..e9ea9e65 100644
--- a/src/llama_stack_client/resources/vector_stores/file_batches.py
+++ b/src/llama_stack_client/resources/vector_stores/file_batches.py
@@ -13,7 +13,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -84,7 +84,7 @@ def create(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._post(
- f"/v1/vector_stores/{vector_store_id}/file_batches",
+ path_template("/v1/vector_stores/{vector_store_id}/file_batches", vector_store_id=vector_store_id),
body=maybe_transform(
{
"file_ids": file_ids,
@@ -132,7 +132,11 @@ def retrieve(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._get(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -172,7 +176,11 @@ def cancel(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._post(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -227,7 +235,11 @@ def list_files(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._get_api_list(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
page=SyncOpenAICursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
@@ -302,7 +314,7 @@ async def create(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return await self._post(
- f"/v1/vector_stores/{vector_store_id}/file_batches",
+ path_template("/v1/vector_stores/{vector_store_id}/file_batches", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
"file_ids": file_ids,
@@ -350,7 +362,11 @@ async def retrieve(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._get(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -390,7 +406,11 @@ async def cancel(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return await self._post(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -445,7 +465,11 @@ def list_files(
if not batch_id:
raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
return self._get_api_list(
- f"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
+ vector_store_id=vector_store_id,
+ batch_id=batch_id,
+ ),
page=AsyncOpenAICursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
diff --git a/src/llama_stack_client/resources/vector_stores/files.py b/src/llama_stack_client/resources/vector_stores/files.py
index 45db6860..e7b486a8 100644
--- a/src/llama_stack_client/resources/vector_stores/files.py
+++ b/src/llama_stack_client/resources/vector_stores/files.py
@@ -14,7 +14,7 @@
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -90,7 +90,7 @@ def create(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._post(
- f"/v1/vector_stores/{vector_store_id}/files",
+ path_template("/v1/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
body=maybe_transform(
{
"file_id": file_id,
@@ -138,7 +138,9 @@ def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -181,7 +183,9 @@ def update(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._post(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
body=maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -232,7 +236,7 @@ def list(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._get_api_list(
- f"/v1/vector_stores/{vector_store_id}/files",
+ path_template("/v1/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
page=SyncOpenAICursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
@@ -286,7 +290,9 @@ def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._delete(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -332,7 +338,11 @@ def content(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}/content",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}/content",
+ vector_store_id=vector_store_id,
+ file_id=file_id,
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -407,7 +417,7 @@ async def create(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return await self._post(
- f"/v1/vector_stores/{vector_store_id}/files",
+ path_template("/v1/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
"file_id": file_id,
@@ -455,7 +465,9 @@ async def retrieve(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -498,7 +510,9 @@ async def update(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._post(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
body=await async_maybe_transform({"attributes": attributes}, file_update_params.FileUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
@@ -549,7 +563,7 @@ def list(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._get_api_list(
- f"/v1/vector_stores/{vector_store_id}/files",
+ path_template("/v1/vector_stores/{vector_store_id}/files", vector_store_id=vector_store_id),
page=AsyncOpenAICursorPage[VectorStoreFile],
options=make_request_options(
extra_headers=extra_headers,
@@ -603,7 +617,9 @@ async def delete(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._delete(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}", vector_store_id=vector_store_id, file_id=file_id
+ ),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -649,7 +665,11 @@ async def content(
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
- f"/v1/vector_stores/{vector_store_id}/files/{file_id}/content",
+ path_template(
+ "/v1/vector_stores/{vector_store_id}/files/{file_id}/content",
+ vector_store_id=vector_store_id,
+ file_id=file_id,
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
diff --git a/src/llama_stack_client/resources/vector_stores/vector_stores.py b/src/llama_stack_client/resources/vector_stores/vector_stores.py
index 6413d1c4..e6b47c7d 100644
--- a/src/llama_stack_client/resources/vector_stores/vector_stores.py
+++ b/src/llama_stack_client/resources/vector_stores/vector_stores.py
@@ -27,7 +27,7 @@
vector_store_update_params,
)
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import path_template, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -156,7 +156,7 @@ def retrieve(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._get(
- f"/v1/vector_stores/{vector_store_id}",
+ path_template("/v1/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -200,7 +200,7 @@ def update(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._post(
- f"/v1/vector_stores/{vector_store_id}",
+ path_template("/v1/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
body=maybe_transform(
{
"expires_after": expires_after,
@@ -298,7 +298,7 @@ def delete(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._delete(
- f"/v1/vector_stores/{vector_store_id}",
+ path_template("/v1/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -367,7 +367,7 @@ def search(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return self._post(
- f"/v1/vector_stores/{vector_store_id}/search",
+ path_template("/v1/vector_stores/{vector_store_id}/search", vector_store_id=vector_store_id),
body=maybe_transform(
{
"query": query,
@@ -489,7 +489,7 @@ async def retrieve(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return await self._get(
- f"/v1/vector_stores/{vector_store_id}",
+ path_template("/v1/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -533,7 +533,7 @@ async def update(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return await self._post(
- f"/v1/vector_stores/{vector_store_id}",
+ path_template("/v1/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
"expires_after": expires_after,
@@ -631,7 +631,7 @@ async def delete(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return await self._delete(
- f"/v1/vector_stores/{vector_store_id}",
+ path_template("/v1/vector_stores/{vector_store_id}", vector_store_id=vector_store_id),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
@@ -700,7 +700,7 @@ async def search(
if not vector_store_id:
raise ValueError(f"Expected a non-empty value for `vector_store_id` but received {vector_store_id!r}")
return await self._post(
- f"/v1/vector_stores/{vector_store_id}/search",
+ path_template("/v1/vector_stores/{vector_store_id}/search", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
"query": query,
diff --git a/src/llama_stack_client/types/file_content_response.py b/src/llama_stack_client/types/file_content_response.py
index c7f72a75..01930307 100644
--- a/src/llama_stack_client/types/file_content_response.py
+++ b/src/llama_stack_client/types/file_content_response.py
@@ -1,3 +1,9 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import TypeAlias
diff --git a/tests/test_utils/test_path.py b/tests/test_utils/test_path.py
new file mode 100644
index 00000000..cd223d37
--- /dev/null
+++ b/tests/test_utils/test_path.py
@@ -0,0 +1,95 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from __future__ import annotations
+
+from typing import Any
+
+import pytest
+
+from llama_stack_client._utils._path import path_template
+
+
+@pytest.mark.parametrize(
+ "template, kwargs, expected",
+ [
+ ("/v1/{id}", dict(id="abc"), "/v1/abc"),
+ ("/v1/{a}/{b}", dict(a="x", b="y"), "/v1/x/y"),
+ ("/v1/{a}{b}/path/{c}?val={d}#{e}", dict(a="x", b="y", c="z", d="u", e="v"), "/v1/xy/path/z?val=u#v"),
+ ("/{w}/{w}", dict(w="echo"), "/echo/echo"),
+ ("/v1/static", {}, "/v1/static"),
+ ("", {}, ""),
+ ("/v1/?q={n}&count=10", dict(n=42), "/v1/?q=42&count=10"),
+ ("/v1/{v}", dict(v=None), "/v1/null"),
+ ("/v1/{v}", dict(v=True), "/v1/true"),
+ ("/v1/{v}", dict(v=False), "/v1/false"),
+ ("/v1/{v}", dict(v=".hidden"), "/v1/.hidden"), # dot prefix ok
+ ("/v1/{v}", dict(v="file.txt"), "/v1/file.txt"), # dot in middle ok
+ ("/v1/{v}", dict(v="..."), "/v1/..."), # triple dot ok
+ ("/v1/{a}{b}", dict(a=".", b="txt"), "/v1/.txt"), # dot var combining with adjacent to be ok
+ ("/items?q={v}#{f}", dict(v=".", f=".."), "/items?q=.#.."), # dots in query/fragment are fine
+ (
+ "/v1/{a}?query={b}",
+ dict(a="../../other/endpoint", b="a&bad=true"),
+ "/v1/..%2F..%2Fother%2Fendpoint?query=a%26bad%3Dtrue",
+ ),
+ ("/v1/{val}", dict(val="a/b/c"), "/v1/a%2Fb%2Fc"),
+ ("/v1/{val}", dict(val="a/b/c?query=value"), "/v1/a%2Fb%2Fc%3Fquery=value"),
+ ("/v1/{val}", dict(val="a/b/c?query=value&bad=true"), "/v1/a%2Fb%2Fc%3Fquery=value&bad=true"),
+ ("/v1/{val}", dict(val="%20"), "/v1/%2520"), # escapes escape sequences in input
+ # Query: slash and ? are safe, # is not
+ ("/items?q={v}", dict(v="a/b"), "/items?q=a/b"),
+ ("/items?q={v}", dict(v="a?b"), "/items?q=a?b"),
+ ("/items?q={v}", dict(v="a#b"), "/items?q=a%23b"),
+ ("/items?q={v}", dict(v="a b"), "/items?q=a%20b"),
+ # Fragment: slash and ? are safe
+ ("/docs#{v}", dict(v="a/b"), "/docs#a/b"),
+ ("/docs#{v}", dict(v="a?b"), "/docs#a?b"),
+ # Path: slash, ? and # are all encoded
+ ("/v1/{v}", dict(v="a/b"), "/v1/a%2Fb"),
+ ("/v1/{v}", dict(v="a?b"), "/v1/a%3Fb"),
+ ("/v1/{v}", dict(v="a#b"), "/v1/a%23b"),
+ # same var encoded differently by component
+ (
+ "/v1/{v}?q={v}#{v}",
+ dict(v="a/b?c#d"),
+ "/v1/a%2Fb%3Fc%23d?q=a/b?c%23d#a/b?c%23d",
+ ),
+ ("/v1/{val}", dict(val="x?admin=true"), "/v1/x%3Fadmin=true"), # query injection
+ ("/v1/{val}", dict(val="x#admin"), "/v1/x%23admin"), # fragment injection
+ ],
+)
+def test_interpolation(template: str, kwargs: dict[str, Any], expected: str) -> None:
+ assert path_template(template, **kwargs) == expected
+
+
+def test_missing_kwarg_raises_key_error() -> None:
+ with pytest.raises(KeyError, match="org_id"):
+ path_template("/v1/{org_id}")
+
+
+@pytest.mark.parametrize(
+ "template, kwargs",
+ [
+ ("{a}/path", dict(a=".")),
+ ("{a}/path", dict(a="..")),
+ ("/v1/{a}", dict(a=".")),
+ ("/v1/{a}", dict(a="..")),
+ ("/v1/{a}/path", dict(a=".")),
+ ("/v1/{a}/path", dict(a="..")),
+ ("/v1/{a}{b}", dict(a=".", b=".")), # adjacent vars → ".."
+ ("/v1/{a}.", dict(a=".")), # var + static → ".."
+ ("/v1/{a}{b}", dict(a="", b=".")), # empty + dot → "."
+ ("/v1/%2e/{x}", dict(x="ok")), # encoded dot in static text
+ ("/v1/%2e./{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/.%2E/{x}", dict(x="ok")), # mixed encoded ".." in static
+ ("/v1/{v}?q=1", dict(v="..")),
+ ("/v1/{v}#frag", dict(v="..")),
+ ],
+)
+def test_dot_segment_rejected(template: str, kwargs: dict[str, Any]) -> None:
+ with pytest.raises(ValueError, match="dot-segment"):
+ path_template(template, **kwargs)
From 23d591c70549c7f00b7be136a19893dbdd65f43c Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:05:16 +0000
Subject: [PATCH 13/26] refactor(tests): switch from prism to steady
---
CONTRIBUTING.md | 2 +-
scripts/mock | 4 ++--
scripts/test | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6208781b..36604c01 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,7 +85,7 @@ $ pip install ./path-to-wheel-file.whl
## Running tests
-Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
+Most tests require you to [set up a mock server](https://github.com/dgellow/steady) against the OpenAPI spec to run the tests.
```sh
$ ./scripts/mock
diff --git a/scripts/mock b/scripts/mock
index d2bddc5f..e1256ee6 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -26,11 +26,11 @@ echo "==> Modifying SSE schemas for the mock server"
yq -i '(.. | select(has("text/event-stream")).["text/event-stream"].schema) = {"type": "string"}' "$SPEC_PATH"
echo "==> Starting mock server with file ${SPEC_PATH}"
-# Run prism mock on the given spec
+# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
npm exec --package=@mockoon/cli@9.3.0 -- mockoon-cli start --data "$SPEC_PATH" --port 4010 &>.mockoon.log &
- # Wait for server to come online (max 30s)
+ # Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
while ! grep -q "Error: \|Server started on port 4010" ".mockoon.log"; do
echo -n "."
diff --git a/scripts/test b/scripts/test
index c7925a63..9532602b 100755
--- a/scripts/test
+++ b/scripts/test
@@ -45,14 +45,14 @@ elif ! prism_is_running; then
echo -e "running against your OpenAPI spec."
echo
echo -e "To run the server, pass in the path or url of your OpenAPI"
- echo -e "spec to the prism command:"
+ echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.3 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets${NC}"
echo
exit 1
else
- echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+ echo -e "${GREEN}✔ Mock steady server is running with your OpenAPI spec${NC}"
echo
fi
From f5ad8f801078d79c03ec7723cd64b1c9895def2d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:06:20 +0000
Subject: [PATCH 14/26] chore(tests): bump steady to v0.19.4
---
scripts/test | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/test b/scripts/test
index 9532602b..11569cb7 100755
--- a/scripts/test
+++ b/scripts/test
@@ -47,7 +47,7 @@ elif ! prism_is_running; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.3 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.4 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From 55689e1ddee55d81efff681dbb3523b0ed09d658 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:09:02 +0000
Subject: [PATCH 15/26] chore(tests): bump steady to v0.19.5
---
scripts/test | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/test b/scripts/test
index 11569cb7..92bf44f0 100755
--- a/scripts/test
+++ b/scripts/test
@@ -47,7 +47,7 @@ elif ! prism_is_running; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.4 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.5 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From c0df2dcf9bb38600f73db746dc38d3277e74e7b9 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:11:53 +0000
Subject: [PATCH 16/26] refactor: remove tool_groups from public API and
auto-register from provider specs
---
.stats.yml | 8 +-
README.md | 9 +-
api.md | 41 --
src/llama_stack_client/_client.py | 114 -----
src/llama_stack_client/resources/__init__.py | 42 --
.../resources/tool_runtime.py | 318 ------------
.../resources/toolgroups.py | 458 ------------------
src/llama_stack_client/resources/tools.py | 280 -----------
src/llama_stack_client/types/__init__.py | 11 -
.../types/list_tool_groups_response.py | 18 -
src/llama_stack_client/types/tool_def.py | 29 --
src/llama_stack_client/types/tool_group.py | 40 --
.../types/tool_invocation_result.py | 121 -----
.../types/tool_list_params.py | 18 -
.../types/tool_list_response.py | 16 -
.../types/tool_runtime_invoke_tool_params.py | 22 -
.../types/tool_runtime_list_tools_params.py | 29 --
.../types/tool_runtime_list_tools_response.py | 16 -
.../types/toolgroup_list_response.py | 16 -
.../types/toolgroup_register_params.py | 31 --
tests/api_resources/test_tool_runtime.py | 213 --------
tests/api_resources/test_toolgroups.py | 362 --------------
tests/api_resources/test_tools.py | 194 --------
23 files changed, 9 insertions(+), 2397 deletions(-)
delete mode 100644 src/llama_stack_client/resources/tool_runtime.py
delete mode 100644 src/llama_stack_client/resources/toolgroups.py
delete mode 100644 src/llama_stack_client/resources/tools.py
delete mode 100644 src/llama_stack_client/types/list_tool_groups_response.py
delete mode 100644 src/llama_stack_client/types/tool_def.py
delete mode 100644 src/llama_stack_client/types/tool_group.py
delete mode 100644 src/llama_stack_client/types/tool_invocation_result.py
delete mode 100644 src/llama_stack_client/types/tool_list_params.py
delete mode 100644 src/llama_stack_client/types/tool_list_response.py
delete mode 100644 src/llama_stack_client/types/tool_runtime_invoke_tool_params.py
delete mode 100644 src/llama_stack_client/types/tool_runtime_list_tools_params.py
delete mode 100644 src/llama_stack_client/types/tool_runtime_list_tools_response.py
delete mode 100644 src/llama_stack_client/types/toolgroup_list_response.py
delete mode 100644 src/llama_stack_client/types/toolgroup_register_params.py
delete mode 100644 tests/api_resources/test_tool_runtime.py
delete mode 100644 tests/api_resources/test_toolgroups.py
delete mode 100644 tests/api_resources/test_tools.py
diff --git a/.stats.yml b/.stats.yml
index c3d0a549..5766c12e 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 102
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-630dfd3e6352479efac56ddb8696a89c8e981c97f7d3056d0c163a95427bca3b.yml
-openapi_spec_hash: 17a085582e81bb2e3ec0abebdb065394
-config_hash: d1db834ac019b3bb2e260c9e37dcb5d1
+configured_endpoints: 94
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-96f8902f19a43855a6ff92a7600f0c44fe59b0bd932e425db3d00cf87d0cc87d.yml
+openapi_spec_hash: 77fb0034945bafbb22c11db8128d6e12
+config_hash: 7d5765272a641656f8231509937663a7
diff --git a/README.md b/README.md
index 5209a8ec..a2757391 100644
--- a/README.md
+++ b/README.md
@@ -253,11 +253,12 @@ from llama_stack_client import LlamaStackClient
client = LlamaStackClient()
-client.toolgroups.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- mcp_endpoint={"uri": "uri"},
+response_object = client.responses.create(
+ input="string",
+ model="model",
+ prompt={"id": "id"},
)
+print(response_object.prompt)
```
## File uploads
diff --git a/api.md b/api.md
index d7defc35..7d117845 100644
--- a/api.md
+++ b/api.md
@@ -18,47 +18,6 @@ from llama_stack_client.types import (
)
```
-# Toolgroups
-
-Types:
-
-```python
-from llama_stack_client.types import ListToolGroupsResponse, ToolGroup, ToolgroupListResponse
-```
-
-Methods:
-
-- client.toolgroups.list() -> ToolgroupListResponse
-- client.toolgroups.get(toolgroup_id) -> ToolGroup
-- client.toolgroups.register(\*\*params) -> None
-- client.toolgroups.unregister(toolgroup_id) -> None
-
-# Tools
-
-Types:
-
-```python
-from llama_stack_client.types import ToolListResponse
-```
-
-Methods:
-
-- client.tools.list(\*\*params) -> ToolListResponse
-- client.tools.get(tool_name) -> ToolDef
-
-# ToolRuntime
-
-Types:
-
-```python
-from llama_stack_client.types import ToolDef, ToolInvocationResult, ToolRuntimeListToolsResponse
-```
-
-Methods:
-
-- client.tool_runtime.invoke_tool(\*\*params) -> ToolInvocationResult
-- client.tool_runtime.list_tools(\*\*params) -> ToolRuntimeListToolsResponse
-
# Responses
Types:
diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py
index 1ed14dfe..ba967d59 100644
--- a/src/llama_stack_client/_client.py
+++ b/src/llama_stack_client/_client.py
@@ -43,7 +43,6 @@
chat,
alpha,
files,
- tools,
models,
routes,
safety,
@@ -56,16 +55,13 @@
responses,
vector_io,
embeddings,
- toolgroups,
completions,
moderations,
- tool_runtime,
conversations,
vector_stores,
scoring_functions,
)
from .resources.files import FilesResource, AsyncFilesResource
- from .resources.tools import ToolsResource, AsyncToolsResource
from .resources.routes import RoutesResource, AsyncRoutesResource
from .resources.safety import SafetyResource, AsyncSafetyResource
from .resources.batches import BatchesResource, AsyncBatchesResource
@@ -77,11 +73,9 @@
from .resources.providers import ProvidersResource, AsyncProvidersResource
from .resources.vector_io import VectorIoResource, AsyncVectorIoResource
from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource
- from .resources.toolgroups import ToolgroupsResource, AsyncToolgroupsResource
from .resources.alpha.alpha import AlphaResource, AsyncAlphaResource
from .resources.completions import CompletionsResource, AsyncCompletionsResource
from .resources.moderations import ModerationsResource, AsyncModerationsResource
- from .resources.tool_runtime import ToolRuntimeResource, AsyncToolRuntimeResource
from .resources.models.models import ModelsResource, AsyncModelsResource
from .resources.prompts.prompts import PromptsResource, AsyncPromptsResource
from .resources.scoring_functions import ScoringFunctionsResource, AsyncScoringFunctionsResource
@@ -160,24 +154,6 @@ def __init__(
self._default_stream_cls = Stream
- @cached_property
- def toolgroups(self) -> ToolgroupsResource:
- from .resources.toolgroups import ToolgroupsResource
-
- return ToolgroupsResource(self)
-
- @cached_property
- def tools(self) -> ToolsResource:
- from .resources.tools import ToolsResource
-
- return ToolsResource(self)
-
- @cached_property
- def tool_runtime(self) -> ToolRuntimeResource:
- from .resources.tool_runtime import ToolRuntimeResource
-
- return ToolRuntimeResource(self)
-
@cached_property
def responses(self) -> ResponsesResource:
"""APIs for creating and interacting with agentic systems."""
@@ -519,24 +495,6 @@ def __init__(
self._default_stream_cls = AsyncStream
- @cached_property
- def toolgroups(self) -> AsyncToolgroupsResource:
- from .resources.toolgroups import AsyncToolgroupsResource
-
- return AsyncToolgroupsResource(self)
-
- @cached_property
- def tools(self) -> AsyncToolsResource:
- from .resources.tools import AsyncToolsResource
-
- return AsyncToolsResource(self)
-
- @cached_property
- def tool_runtime(self) -> AsyncToolRuntimeResource:
- from .resources.tool_runtime import AsyncToolRuntimeResource
-
- return AsyncToolRuntimeResource(self)
-
@cached_property
def responses(self) -> AsyncResponsesResource:
"""APIs for creating and interacting with agentic systems."""
@@ -825,24 +783,6 @@ class LlamaStackClientWithRawResponse:
def __init__(self, client: LlamaStackClient) -> None:
self._client = client
- @cached_property
- def toolgroups(self) -> toolgroups.ToolgroupsResourceWithRawResponse:
- from .resources.toolgroups import ToolgroupsResourceWithRawResponse
-
- return ToolgroupsResourceWithRawResponse(self._client.toolgroups)
-
- @cached_property
- def tools(self) -> tools.ToolsResourceWithRawResponse:
- from .resources.tools import ToolsResourceWithRawResponse
-
- return ToolsResourceWithRawResponse(self._client.tools)
-
- @cached_property
- def tool_runtime(self) -> tool_runtime.ToolRuntimeResourceWithRawResponse:
- from .resources.tool_runtime import ToolRuntimeResourceWithRawResponse
-
- return ToolRuntimeResourceWithRawResponse(self._client.tool_runtime)
-
@cached_property
def responses(self) -> responses.ResponsesResourceWithRawResponse:
"""APIs for creating and interacting with agentic systems."""
@@ -1017,24 +957,6 @@ class AsyncLlamaStackClientWithRawResponse:
def __init__(self, client: AsyncLlamaStackClient) -> None:
self._client = client
- @cached_property
- def toolgroups(self) -> toolgroups.AsyncToolgroupsResourceWithRawResponse:
- from .resources.toolgroups import AsyncToolgroupsResourceWithRawResponse
-
- return AsyncToolgroupsResourceWithRawResponse(self._client.toolgroups)
-
- @cached_property
- def tools(self) -> tools.AsyncToolsResourceWithRawResponse:
- from .resources.tools import AsyncToolsResourceWithRawResponse
-
- return AsyncToolsResourceWithRawResponse(self._client.tools)
-
- @cached_property
- def tool_runtime(self) -> tool_runtime.AsyncToolRuntimeResourceWithRawResponse:
- from .resources.tool_runtime import AsyncToolRuntimeResourceWithRawResponse
-
- return AsyncToolRuntimeResourceWithRawResponse(self._client.tool_runtime)
-
@cached_property
def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
"""APIs for creating and interacting with agentic systems."""
@@ -1209,24 +1131,6 @@ class LlamaStackClientWithStreamedResponse:
def __init__(self, client: LlamaStackClient) -> None:
self._client = client
- @cached_property
- def toolgroups(self) -> toolgroups.ToolgroupsResourceWithStreamingResponse:
- from .resources.toolgroups import ToolgroupsResourceWithStreamingResponse
-
- return ToolgroupsResourceWithStreamingResponse(self._client.toolgroups)
-
- @cached_property
- def tools(self) -> tools.ToolsResourceWithStreamingResponse:
- from .resources.tools import ToolsResourceWithStreamingResponse
-
- return ToolsResourceWithStreamingResponse(self._client.tools)
-
- @cached_property
- def tool_runtime(self) -> tool_runtime.ToolRuntimeResourceWithStreamingResponse:
- from .resources.tool_runtime import ToolRuntimeResourceWithStreamingResponse
-
- return ToolRuntimeResourceWithStreamingResponse(self._client.tool_runtime)
-
@cached_property
def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
"""APIs for creating and interacting with agentic systems."""
@@ -1401,24 +1305,6 @@ class AsyncLlamaStackClientWithStreamedResponse:
def __init__(self, client: AsyncLlamaStackClient) -> None:
self._client = client
- @cached_property
- def toolgroups(self) -> toolgroups.AsyncToolgroupsResourceWithStreamingResponse:
- from .resources.toolgroups import AsyncToolgroupsResourceWithStreamingResponse
-
- return AsyncToolgroupsResourceWithStreamingResponse(self._client.toolgroups)
-
- @cached_property
- def tools(self) -> tools.AsyncToolsResourceWithStreamingResponse:
- from .resources.tools import AsyncToolsResourceWithStreamingResponse
-
- return AsyncToolsResourceWithStreamingResponse(self._client.tools)
-
- @cached_property
- def tool_runtime(self) -> tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse:
- from .resources.tool_runtime import AsyncToolRuntimeResourceWithStreamingResponse
-
- return AsyncToolRuntimeResourceWithStreamingResponse(self._client.tool_runtime)
-
@cached_property
def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
"""APIs for creating and interacting with agentic systems."""
diff --git a/src/llama_stack_client/resources/__init__.py b/src/llama_stack_client/resources/__init__.py
index d0ec7ad7..882b3c9d 100644
--- a/src/llama_stack_client/resources/__init__.py
+++ b/src/llama_stack_client/resources/__init__.py
@@ -38,14 +38,6 @@
FilesResourceWithStreamingResponse,
AsyncFilesResourceWithStreamingResponse,
)
-from .tools import (
- ToolsResource,
- AsyncToolsResource,
- ToolsResourceWithRawResponse,
- AsyncToolsResourceWithRawResponse,
- ToolsResourceWithStreamingResponse,
- AsyncToolsResourceWithStreamingResponse,
-)
from .models import (
ModelsResource,
AsyncModelsResource,
@@ -142,14 +134,6 @@
EmbeddingsResourceWithStreamingResponse,
AsyncEmbeddingsResourceWithStreamingResponse,
)
-from .toolgroups import (
- ToolgroupsResource,
- AsyncToolgroupsResource,
- ToolgroupsResourceWithRawResponse,
- AsyncToolgroupsResourceWithRawResponse,
- ToolgroupsResourceWithStreamingResponse,
- AsyncToolgroupsResourceWithStreamingResponse,
-)
from .completions import (
CompletionsResource,
AsyncCompletionsResource,
@@ -166,14 +150,6 @@
ModerationsResourceWithStreamingResponse,
AsyncModerationsResourceWithStreamingResponse,
)
-from .tool_runtime import (
- ToolRuntimeResource,
- AsyncToolRuntimeResource,
- ToolRuntimeResourceWithRawResponse,
- AsyncToolRuntimeResourceWithRawResponse,
- ToolRuntimeResourceWithStreamingResponse,
- AsyncToolRuntimeResourceWithStreamingResponse,
-)
from .conversations import (
ConversationsResource,
AsyncConversationsResource,
@@ -200,24 +176,6 @@
)
__all__ = [
- "ToolgroupsResource",
- "AsyncToolgroupsResource",
- "ToolgroupsResourceWithRawResponse",
- "AsyncToolgroupsResourceWithRawResponse",
- "ToolgroupsResourceWithStreamingResponse",
- "AsyncToolgroupsResourceWithStreamingResponse",
- "ToolsResource",
- "AsyncToolsResource",
- "ToolsResourceWithRawResponse",
- "AsyncToolsResourceWithRawResponse",
- "ToolsResourceWithStreamingResponse",
- "AsyncToolsResourceWithStreamingResponse",
- "ToolRuntimeResource",
- "AsyncToolRuntimeResource",
- "ToolRuntimeResourceWithRawResponse",
- "AsyncToolRuntimeResourceWithRawResponse",
- "ToolRuntimeResourceWithStreamingResponse",
- "AsyncToolRuntimeResourceWithStreamingResponse",
"ResponsesResource",
"AsyncResponsesResource",
"ResponsesResourceWithRawResponse",
diff --git a/src/llama_stack_client/resources/tool_runtime.py b/src/llama_stack_client/resources/tool_runtime.py
deleted file mode 100644
index 9f78ed23..00000000
--- a/src/llama_stack_client/resources/tool_runtime.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing import Dict, Type, Optional, cast
-
-import httpx
-
-from ..types import tool_runtime_list_tools_params, tool_runtime_invoke_tool_params
-from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._wrappers import DataWrapper
-from .._base_client import make_request_options
-from ..types.tool_invocation_result import ToolInvocationResult
-from ..types.tool_runtime_list_tools_response import ToolRuntimeListToolsResponse
-
-__all__ = ["ToolRuntimeResource", "AsyncToolRuntimeResource"]
-
-
-class ToolRuntimeResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ToolRuntimeResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return ToolRuntimeResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ToolRuntimeResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return ToolRuntimeResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- def invoke_tool(
- self,
- *,
- kwargs: Dict[str, object],
- tool_name: str,
- authorization: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolInvocationResult:
- """
- Run a tool with the given arguments.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/tool-runtime/invoke",
- body=maybe_transform(
- {
- "kwargs": kwargs,
- "tool_name": tool_name,
- "authorization": authorization,
- },
- tool_runtime_invoke_tool_params.ToolRuntimeInvokeToolParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolInvocationResult,
- )
-
- @typing_extensions.deprecated("deprecated")
- def list_tools(
- self,
- *,
- authorization: Optional[str] | Omit = omit,
- mcp_endpoint: Optional[tool_runtime_list_tools_params.McpEndpoint] | Omit = omit,
- tool_group_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolRuntimeListToolsResponse:
- """
- List all tools in the runtime.
-
- Args:
- mcp_endpoint: A URL reference to external content.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/v1/tool-runtime/list-tools",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "authorization": authorization,
- "mcp_endpoint": mcp_endpoint,
- "tool_group_id": tool_group_id,
- },
- tool_runtime_list_tools_params.ToolRuntimeListToolsParams,
- ),
- post_parser=DataWrapper[ToolRuntimeListToolsResponse]._unwrapper,
- ),
- cast_to=cast(Type[ToolRuntimeListToolsResponse], DataWrapper[ToolRuntimeListToolsResponse]),
- )
-
-
-class AsyncToolRuntimeResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncToolRuntimeResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncToolRuntimeResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncToolRuntimeResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncToolRuntimeResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- async def invoke_tool(
- self,
- *,
- kwargs: Dict[str, object],
- tool_name: str,
- authorization: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolInvocationResult:
- """
- Run a tool with the given arguments.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/tool-runtime/invoke",
- body=await async_maybe_transform(
- {
- "kwargs": kwargs,
- "tool_name": tool_name,
- "authorization": authorization,
- },
- tool_runtime_invoke_tool_params.ToolRuntimeInvokeToolParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolInvocationResult,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def list_tools(
- self,
- *,
- authorization: Optional[str] | Omit = omit,
- mcp_endpoint: Optional[tool_runtime_list_tools_params.McpEndpoint] | Omit = omit,
- tool_group_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolRuntimeListToolsResponse:
- """
- List all tools in the runtime.
-
- Args:
- mcp_endpoint: A URL reference to external content.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/v1/tool-runtime/list-tools",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "authorization": authorization,
- "mcp_endpoint": mcp_endpoint,
- "tool_group_id": tool_group_id,
- },
- tool_runtime_list_tools_params.ToolRuntimeListToolsParams,
- ),
- post_parser=DataWrapper[ToolRuntimeListToolsResponse]._unwrapper,
- ),
- cast_to=cast(Type[ToolRuntimeListToolsResponse], DataWrapper[ToolRuntimeListToolsResponse]),
- )
-
-
-class ToolRuntimeResourceWithRawResponse:
- def __init__(self, tool_runtime: ToolRuntimeResource) -> None:
- self._tool_runtime = tool_runtime
-
- self.invoke_tool = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- tool_runtime.invoke_tool, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_tools = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- tool_runtime.list_tools, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncToolRuntimeResourceWithRawResponse:
- def __init__(self, tool_runtime: AsyncToolRuntimeResource) -> None:
- self._tool_runtime = tool_runtime
-
- self.invoke_tool = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- tool_runtime.invoke_tool, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_tools = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- tool_runtime.list_tools, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class ToolRuntimeResourceWithStreamingResponse:
- def __init__(self, tool_runtime: ToolRuntimeResource) -> None:
- self._tool_runtime = tool_runtime
-
- self.invoke_tool = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- tool_runtime.invoke_tool, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_tools = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- tool_runtime.list_tools, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncToolRuntimeResourceWithStreamingResponse:
- def __init__(self, tool_runtime: AsyncToolRuntimeResource) -> None:
- self._tool_runtime = tool_runtime
-
- self.invoke_tool = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- tool_runtime.invoke_tool, # pyright: ignore[reportDeprecated],
- )
- )
- self.list_tools = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- tool_runtime.list_tools, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/llama_stack_client/resources/toolgroups.py b/src/llama_stack_client/resources/toolgroups.py
deleted file mode 100644
index 881ab5d3..00000000
--- a/src/llama_stack_client/resources/toolgroups.py
+++ /dev/null
@@ -1,458 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing import Dict, Type, Optional, cast
-
-import httpx
-
-from ..types import toolgroup_register_params
-from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import path_template, maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._wrappers import DataWrapper
-from .._base_client import make_request_options
-from ..types.tool_group import ToolGroup
-from ..types.toolgroup_list_response import ToolgroupListResponse
-
-__all__ = ["ToolgroupsResource", "AsyncToolgroupsResource"]
-
-
-class ToolgroupsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ToolgroupsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return ToolgroupsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ToolgroupsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return ToolgroupsResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolgroupListResponse:
- """List tool groups with optional provider."""
- return self._get(
- "/v1/toolgroups",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ToolgroupListResponse]._unwrapper,
- ),
- cast_to=cast(Type[ToolgroupListResponse], DataWrapper[ToolgroupListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- def get(
- self,
- toolgroup_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolGroup:
- """
- Get a tool group by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not toolgroup_id:
- raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
- return self._get(
- path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolGroup,
- )
-
- @typing_extensions.deprecated("deprecated")
- def register(
- self,
- *,
- provider_id: str,
- toolgroup_id: str,
- args: Optional[Dict[str, object]] | Omit = omit,
- mcp_endpoint: Optional[toolgroup_register_params.McpEndpoint] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a tool group.
-
- Args:
- mcp_endpoint: A URL reference to external content.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- "/v1/toolgroups",
- body=maybe_transform(
- {
- "provider_id": provider_id,
- "toolgroup_id": toolgroup_id,
- "args": args,
- "mcp_endpoint": mcp_endpoint,
- },
- toolgroup_register_params.ToolgroupRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- @typing_extensions.deprecated("deprecated")
- def unregister(
- self,
- toolgroup_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a tool group.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not toolgroup_id:
- raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class AsyncToolgroupsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncToolgroupsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncToolgroupsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncToolgroupsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncToolgroupsResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolgroupListResponse:
- """List tool groups with optional provider."""
- return await self._get(
- "/v1/toolgroups",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ToolgroupListResponse]._unwrapper,
- ),
- cast_to=cast(Type[ToolgroupListResponse], DataWrapper[ToolgroupListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- async def get(
- self,
- toolgroup_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolGroup:
- """
- Get a tool group by its ID.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not toolgroup_id:
- raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
- return await self._get(
- path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolGroup,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def register(
- self,
- *,
- provider_id: str,
- toolgroup_id: str,
- args: Optional[Dict[str, object]] | Omit = omit,
- mcp_endpoint: Optional[toolgroup_register_params.McpEndpoint] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a tool group.
-
- Args:
- mcp_endpoint: A URL reference to external content.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- "/v1/toolgroups",
- body=await async_maybe_transform(
- {
- "provider_id": provider_id,
- "toolgroup_id": toolgroup_id,
- "args": args,
- "mcp_endpoint": mcp_endpoint,
- },
- toolgroup_register_params.ToolgroupRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def unregister(
- self,
- toolgroup_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a tool group.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not toolgroup_id:
- raise ValueError(f"Expected a non-empty value for `toolgroup_id` but received {toolgroup_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- path_template("/v1/toolgroups/{toolgroup_id}", toolgroup_id=toolgroup_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class ToolgroupsResourceWithRawResponse:
- def __init__(self, toolgroups: ToolgroupsResource) -> None:
- self._toolgroups = toolgroups
-
- self.list = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- toolgroups.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- toolgroups.get, # pyright: ignore[reportDeprecated],
- )
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- toolgroups.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- toolgroups.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncToolgroupsResourceWithRawResponse:
- def __init__(self, toolgroups: AsyncToolgroupsResource) -> None:
- self._toolgroups = toolgroups
-
- self.list = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- toolgroups.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- toolgroups.get, # pyright: ignore[reportDeprecated],
- )
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- toolgroups.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- toolgroups.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class ToolgroupsResourceWithStreamingResponse:
- def __init__(self, toolgroups: ToolgroupsResource) -> None:
- self._toolgroups = toolgroups
-
- self.list = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- toolgroups.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- toolgroups.get, # pyright: ignore[reportDeprecated],
- )
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- toolgroups.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- toolgroups.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncToolgroupsResourceWithStreamingResponse:
- def __init__(self, toolgroups: AsyncToolgroupsResource) -> None:
- self._toolgroups = toolgroups
-
- self.list = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- toolgroups.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- toolgroups.get, # pyright: ignore[reportDeprecated],
- )
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- toolgroups.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- toolgroups.unregister, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/llama_stack_client/resources/tools.py b/src/llama_stack_client/resources/tools.py
deleted file mode 100644
index 09a4c19f..00000000
--- a/src/llama_stack_client/resources/tools.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing import Type, Optional, cast
-
-import httpx
-
-from ..types import tool_list_params
-from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import path_template, maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._wrappers import DataWrapper
-from .._base_client import make_request_options
-from ..types.tool_def import ToolDef
-from ..types.tool_list_response import ToolListResponse
-
-__all__ = ["ToolsResource", "AsyncToolsResource"]
-
-
-class ToolsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ToolsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return ToolsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ToolsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return ToolsResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- def list(
- self,
- *,
- toolgroup_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolListResponse:
- """
- List tools with optional tool group.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/v1/tools",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"toolgroup_id": toolgroup_id}, tool_list_params.ToolListParams),
- post_parser=DataWrapper[ToolListResponse]._unwrapper,
- ),
- cast_to=cast(Type[ToolListResponse], DataWrapper[ToolListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- def get(
- self,
- tool_name: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolDef:
- """
- Get a tool by its name.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not tool_name:
- raise ValueError(f"Expected a non-empty value for `tool_name` but received {tool_name!r}")
- return self._get(
- path_template("/v1/tools/{tool_name}", tool_name=tool_name),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolDef,
- )
-
-
-class AsyncToolsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncToolsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncToolsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncToolsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncToolsResourceWithStreamingResponse(self)
-
- @typing_extensions.deprecated("deprecated")
- async def list(
- self,
- *,
- toolgroup_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolListResponse:
- """
- List tools with optional tool group.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/v1/tools",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform({"toolgroup_id": toolgroup_id}, tool_list_params.ToolListParams),
- post_parser=DataWrapper[ToolListResponse]._unwrapper,
- ),
- cast_to=cast(Type[ToolListResponse], DataWrapper[ToolListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- async def get(
- self,
- tool_name: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ToolDef:
- """
- Get a tool by its name.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not tool_name:
- raise ValueError(f"Expected a non-empty value for `tool_name` but received {tool_name!r}")
- return await self._get(
- path_template("/v1/tools/{tool_name}", tool_name=tool_name),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolDef,
- )
-
-
-class ToolsResourceWithRawResponse:
- def __init__(self, tools: ToolsResource) -> None:
- self._tools = tools
-
- self.list = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- tools.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- tools.get, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncToolsResourceWithRawResponse:
- def __init__(self, tools: AsyncToolsResource) -> None:
- self._tools = tools
-
- self.list = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- tools.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- tools.get, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class ToolsResourceWithStreamingResponse:
- def __init__(self, tools: ToolsResource) -> None:
- self._tools = tools
-
- self.list = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- tools.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- tools.get, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncToolsResourceWithStreamingResponse:
- def __init__(self, tools: AsyncToolsResource) -> None:
- self._tools = tools
-
- self.list = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- tools.list, # pyright: ignore[reportDeprecated],
- )
- )
- self.get = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- tools.get, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/llama_stack_client/types/__init__.py b/src/llama_stack_client/types/__init__.py
index 7fee4b11..b40e8110 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/llama_stack_client/types/__init__.py
@@ -27,18 +27,14 @@
InterleavedContentItem as InterleavedContentItem,
)
from .shield import Shield as Shield
-from .tool_def import ToolDef as ToolDef
from .scoring_fn import ScoringFn as ScoringFn
-from .tool_group import ToolGroup as ToolGroup
from .vector_store import VectorStore as VectorStore
from .create_response import CreateResponse as CreateResponse
from .response_object import ResponseObject as ResponseObject
from .file_list_params import FileListParams as FileListParams
-from .tool_list_params import ToolListParams as ToolListParams
from .batch_list_params import BatchListParams as BatchListParams
from .route_list_params import RouteListParams as RouteListParams
from .file_create_params import FileCreateParams as FileCreateParams
-from .tool_list_response import ToolListResponse as ToolListResponse
from .batch_create_params import BatchCreateParams as BatchCreateParams
from .batch_list_response import BatchListResponse as BatchListResponse
from .conversation_object import ConversationObject as ConversationObject
@@ -69,21 +65,17 @@
from .response_object_stream import ResponseObjectStream as ResponseObjectStream
from .scoring_score_response import ScoringScoreResponse as ScoringScoreResponse
from .shield_register_params import ShieldRegisterParams as ShieldRegisterParams
-from .tool_invocation_result import ToolInvocationResult as ToolInvocationResult
from .vector_io_query_params import VectorIoQueryParams as VectorIoQueryParams
from .batch_retrieve_response import BatchRetrieveResponse as BatchRetrieveResponse
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .model_register_response import ModelRegisterResponse as ModelRegisterResponse
from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse
-from .toolgroup_list_response import ToolgroupListResponse as ToolgroupListResponse
from .vector_io_insert_params import VectorIoInsertParams as VectorIoInsertParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
from .response_delete_response import ResponseDeleteResponse as ResponseDeleteResponse
from .safety_run_shield_params import SafetyRunShieldParams as SafetyRunShieldParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
-from .list_tool_groups_response import ListToolGroupsResponse as ListToolGroupsResponse
-from .toolgroup_register_params import ToolgroupRegisterParams as ToolgroupRegisterParams
from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
from .conversation_create_params import ConversationCreateParams as ConversationCreateParams
from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams
@@ -98,9 +90,6 @@
from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
from .scoring_function_list_response import ScoringFunctionListResponse as ScoringFunctionListResponse
-from .tool_runtime_list_tools_params import ToolRuntimeListToolsParams as ToolRuntimeListToolsParams
from .list_scoring_functions_response import ListScoringFunctionsResponse as ListScoringFunctionsResponse
-from .tool_runtime_invoke_tool_params import ToolRuntimeInvokeToolParams as ToolRuntimeInvokeToolParams
from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams
-from .tool_runtime_list_tools_response import ToolRuntimeListToolsResponse as ToolRuntimeListToolsResponse
from .prompt_set_default_version_params import PromptSetDefaultVersionParams as PromptSetDefaultVersionParams
diff --git a/src/llama_stack_client/types/list_tool_groups_response.py b/src/llama_stack_client/types/list_tool_groups_response.py
deleted file mode 100644
index 1484d78d..00000000
--- a/src/llama_stack_client/types/list_tool_groups_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-from .toolgroup_list_response import ToolgroupListResponse
-
-__all__ = ["ListToolGroupsResponse"]
-
-
-class ListToolGroupsResponse(BaseModel):
- """Response containing a list of tool groups."""
-
- data: ToolgroupListResponse
diff --git a/src/llama_stack_client/types/tool_def.py b/src/llama_stack_client/types/tool_def.py
deleted file mode 100644
index 1866f3ac..00000000
--- a/src/llama_stack_client/types/tool_def.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-
-from .._models import BaseModel
-
-__all__ = ["ToolDef"]
-
-
-class ToolDef(BaseModel):
- """Tool definition used in runtime contexts."""
-
- name: str
-
- description: Optional[str] = None
-
- input_schema: Optional[Dict[str, object]] = None
-
- metadata: Optional[Dict[str, object]] = None
-
- output_schema: Optional[Dict[str, object]] = None
-
- toolgroup_id: Optional[str] = None
diff --git a/src/llama_stack_client/types/tool_group.py b/src/llama_stack_client/types/tool_group.py
deleted file mode 100644
index 70f1f91c..00000000
--- a/src/llama_stack_client/types/tool_group.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ToolGroup", "McpEndpoint"]
-
-
-class McpEndpoint(BaseModel):
- """A URL reference to external content."""
-
- uri: str
-
-
-class ToolGroup(BaseModel):
- """A group of related tools managed together."""
-
- identifier: str
- """Unique identifier for this resource in llama stack"""
-
- provider_id: str
- """ID of the provider that owns this resource"""
-
- args: Optional[Dict[str, object]] = None
-
- mcp_endpoint: Optional[McpEndpoint] = None
- """A URL reference to external content."""
-
- provider_resource_id: Optional[str] = None
- """Unique identifier for this resource in the provider"""
-
- type: Optional[Literal["tool_group"]] = None
diff --git a/src/llama_stack_client/types/tool_invocation_result.py b/src/llama_stack_client/types/tool_invocation_result.py
deleted file mode 100644
index e97f9d7f..00000000
--- a/src/llama_stack_client/types/tool_invocation_result.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-
-__all__ = [
- "ToolInvocationResult",
- "Content",
- "ContentImageContentItemOutput",
- "ContentImageContentItemOutputImage",
- "ContentImageContentItemOutputImageURL",
- "ContentTextContentItem",
- "ContentListImageContentItemOutputTextContentItem",
- "ContentListImageContentItemOutputTextContentItemImageContentItemOutput",
- "ContentListImageContentItemOutputTextContentItemImageContentItemOutputImage",
- "ContentListImageContentItemOutputTextContentItemImageContentItemOutputImageURL",
- "ContentListImageContentItemOutputTextContentItemTextContentItem",
-]
-
-
-class ContentImageContentItemOutputImageURL(BaseModel):
- """A URL reference to external content."""
-
- uri: str
-
-
-class ContentImageContentItemOutputImage(BaseModel):
- """A URL or a base64 encoded string"""
-
- data: Optional[str] = None
-
- url: Optional[ContentImageContentItemOutputImageURL] = None
- """A URL reference to external content."""
-
-
-class ContentImageContentItemOutput(BaseModel):
- """A image content item"""
-
- image: ContentImageContentItemOutputImage
- """A URL or a base64 encoded string"""
-
- type: Optional[Literal["image"]] = None
-
-
-class ContentTextContentItem(BaseModel):
- """A text content item"""
-
- text: str
-
- type: Optional[Literal["text"]] = None
-
-
-class ContentListImageContentItemOutputTextContentItemImageContentItemOutputImageURL(BaseModel):
- """A URL reference to external content."""
-
- uri: str
-
-
-class ContentListImageContentItemOutputTextContentItemImageContentItemOutputImage(BaseModel):
- """A URL or a base64 encoded string"""
-
- data: Optional[str] = None
-
- url: Optional[ContentListImageContentItemOutputTextContentItemImageContentItemOutputImageURL] = None
- """A URL reference to external content."""
-
-
-class ContentListImageContentItemOutputTextContentItemImageContentItemOutput(BaseModel):
- """A image content item"""
-
- image: ContentListImageContentItemOutputTextContentItemImageContentItemOutputImage
- """A URL or a base64 encoded string"""
-
- type: Optional[Literal["image"]] = None
-
-
-class ContentListImageContentItemOutputTextContentItemTextContentItem(BaseModel):
- """A text content item"""
-
- text: str
-
- type: Optional[Literal["text"]] = None
-
-
-ContentListImageContentItemOutputTextContentItem: TypeAlias = Annotated[
- Union[
- ContentListImageContentItemOutputTextContentItemImageContentItemOutput,
- ContentListImageContentItemOutputTextContentItemTextContentItem,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-Content: TypeAlias = Union[
- str,
- ContentImageContentItemOutput,
- ContentTextContentItem,
- List[ContentListImageContentItemOutputTextContentItem],
- None,
-]
-
-
-class ToolInvocationResult(BaseModel):
- """Result of a tool invocation."""
-
- content: Optional[Content] = None
- """A image content item"""
-
- error_code: Optional[int] = None
-
- error_message: Optional[str] = None
-
- metadata: Optional[Dict[str, object]] = None
diff --git a/src/llama_stack_client/types/tool_list_params.py b/src/llama_stack_client/types/tool_list_params.py
deleted file mode 100644
index acd34c77..00000000
--- a/src/llama_stack_client/types/tool_list_params.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import TypedDict
-
-__all__ = ["ToolListParams"]
-
-
-class ToolListParams(TypedDict, total=False):
- toolgroup_id: Optional[str]
diff --git a/src/llama_stack_client/types/tool_list_response.py b/src/llama_stack_client/types/tool_list_response.py
deleted file mode 100644
index 240ed3b7..00000000
--- a/src/llama_stack_client/types/tool_list_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .tool_def import ToolDef
-
-__all__ = ["ToolListResponse"]
-
-ToolListResponse: TypeAlias = List[ToolDef]
diff --git a/src/llama_stack_client/types/tool_runtime_invoke_tool_params.py b/src/llama_stack_client/types/tool_runtime_invoke_tool_params.py
deleted file mode 100644
index 3e1027eb..00000000
--- a/src/llama_stack_client/types/tool_runtime_invoke_tool_params.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ToolRuntimeInvokeToolParams"]
-
-
-class ToolRuntimeInvokeToolParams(TypedDict, total=False):
- kwargs: Required[Dict[str, object]]
-
- tool_name: Required[str]
-
- authorization: Optional[str]
diff --git a/src/llama_stack_client/types/tool_runtime_list_tools_params.py b/src/llama_stack_client/types/tool_runtime_list_tools_params.py
deleted file mode 100644
index d2ee9188..00000000
--- a/src/llama_stack_client/types/tool_runtime_list_tools_params.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ToolRuntimeListToolsParams", "McpEndpoint"]
-
-
-class ToolRuntimeListToolsParams(TypedDict, total=False):
- authorization: Optional[str]
-
- mcp_endpoint: Optional[McpEndpoint]
- """A URL reference to external content."""
-
- tool_group_id: Optional[str]
-
-
-class McpEndpoint(TypedDict, total=False):
- """A URL reference to external content."""
-
- uri: Required[str]
diff --git a/src/llama_stack_client/types/tool_runtime_list_tools_response.py b/src/llama_stack_client/types/tool_runtime_list_tools_response.py
deleted file mode 100644
index 6854cb06..00000000
--- a/src/llama_stack_client/types/tool_runtime_list_tools_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .tool_def import ToolDef
-
-__all__ = ["ToolRuntimeListToolsResponse"]
-
-ToolRuntimeListToolsResponse: TypeAlias = List[ToolDef]
diff --git a/src/llama_stack_client/types/toolgroup_list_response.py b/src/llama_stack_client/types/toolgroup_list_response.py
deleted file mode 100644
index b468e07d..00000000
--- a/src/llama_stack_client/types/toolgroup_list_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .tool_group import ToolGroup
-
-__all__ = ["ToolgroupListResponse"]
-
-ToolgroupListResponse: TypeAlias = List[ToolGroup]
diff --git a/src/llama_stack_client/types/toolgroup_register_params.py b/src/llama_stack_client/types/toolgroup_register_params.py
deleted file mode 100644
index 6754c80f..00000000
--- a/src/llama_stack_client/types/toolgroup_register_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-__all__ = ["ToolgroupRegisterParams", "McpEndpoint"]
-
-
-class ToolgroupRegisterParams(TypedDict, total=False):
- provider_id: Required[str]
-
- toolgroup_id: Required[str]
-
- args: Optional[Dict[str, object]]
-
- mcp_endpoint: Optional[McpEndpoint]
- """A URL reference to external content."""
-
-
-class McpEndpoint(TypedDict, total=False):
- """A URL reference to external content."""
-
- uri: Required[str]
diff --git a/tests/api_resources/test_tool_runtime.py b/tests/api_resources/test_tool_runtime.py
deleted file mode 100644
index 0e8be87b..00000000
--- a/tests/api_resources/test_tool_runtime.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
- ToolInvocationResult,
- ToolRuntimeListToolsResponse,
-)
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestToolRuntime:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_invoke_tool(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = client.tool_runtime.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- )
-
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- @parametrize
- def test_method_invoke_tool_with_all_params(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = client.tool_runtime.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- authorization="authorization",
- )
-
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- @parametrize
- def test_raw_response_invoke_tool(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.tool_runtime.with_raw_response.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool_runtime = response.parse()
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- @parametrize
- def test_streaming_response_invoke_tool(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.tool_runtime.with_streaming_response.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool_runtime = response.parse()
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_list_tools(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = client.tool_runtime.list_tools()
-
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- @parametrize
- def test_method_list_tools_with_all_params(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = client.tool_runtime.list_tools(
- authorization="authorization",
- mcp_endpoint={"uri": "uri"},
- tool_group_id="tool_group_id",
- )
-
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- @parametrize
- def test_raw_response_list_tools(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.tool_runtime.with_raw_response.list_tools()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool_runtime = response.parse()
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- @parametrize
- def test_streaming_response_list_tools(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.tool_runtime.with_streaming_response.list_tools() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool_runtime = response.parse()
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncToolRuntime:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_invoke_tool(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = await async_client.tool_runtime.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- )
-
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- @parametrize
- async def test_method_invoke_tool_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = await async_client.tool_runtime.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- authorization="authorization",
- )
-
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- @parametrize
- async def test_raw_response_invoke_tool(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.tool_runtime.with_raw_response.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool_runtime = await response.parse()
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- @parametrize
- async def test_streaming_response_invoke_tool(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.tool_runtime.with_streaming_response.invoke_tool(
- kwargs={"foo": "bar"},
- tool_name="tool_name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool_runtime = await response.parse()
- assert_matches_type(ToolInvocationResult, tool_runtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_list_tools(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = await async_client.tool_runtime.list_tools()
-
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- @parametrize
- async def test_method_list_tools_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool_runtime = await async_client.tool_runtime.list_tools(
- authorization="authorization",
- mcp_endpoint={"uri": "uri"},
- tool_group_id="tool_group_id",
- )
-
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- @parametrize
- async def test_raw_response_list_tools(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.tool_runtime.with_raw_response.list_tools()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool_runtime = await response.parse()
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- @parametrize
- async def test_streaming_response_list_tools(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.tool_runtime.with_streaming_response.list_tools() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool_runtime = await response.parse()
- assert_matches_type(ToolRuntimeListToolsResponse, tool_runtime, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_toolgroups.py b/tests/api_resources/test_toolgroups.py
deleted file mode 100644
index f5a3eafd..00000000
--- a/tests/api_resources/test_toolgroups.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import ToolGroup, ToolgroupListResponse
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestToolgroups:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = client.toolgroups.list()
-
- assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.toolgroups.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = response.parse()
- assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.toolgroups.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = response.parse()
- assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = client.toolgroups.get(
- "toolgroup_id",
- )
-
- assert_matches_type(ToolGroup, toolgroup, path=["response"])
-
- @parametrize
- def test_raw_response_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.toolgroups.with_raw_response.get(
- "toolgroup_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = response.parse()
- assert_matches_type(ToolGroup, toolgroup, path=["response"])
-
- @parametrize
- def test_streaming_response_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.toolgroups.with_streaming_response.get(
- "toolgroup_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = response.parse()
- assert_matches_type(ToolGroup, toolgroup, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"):
- client.toolgroups.with_raw_response.get(
- "",
- )
-
- @parametrize
- def test_method_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = client.toolgroups.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- )
-
- assert toolgroup is None
-
- @parametrize
- def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = client.toolgroups.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- args={"foo": "bar"},
- mcp_endpoint={"uri": "uri"},
- )
-
- assert toolgroup is None
-
- @parametrize
- def test_raw_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.toolgroups.with_raw_response.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = response.parse()
- assert toolgroup is None
-
- @parametrize
- def test_streaming_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.toolgroups.with_streaming_response.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = response.parse()
- assert toolgroup is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = client.toolgroups.unregister(
- "toolgroup_id",
- )
-
- assert toolgroup is None
-
- @parametrize
- def test_raw_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.toolgroups.with_raw_response.unregister(
- "toolgroup_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = response.parse()
- assert toolgroup is None
-
- @parametrize
- def test_streaming_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.toolgroups.with_streaming_response.unregister(
- "toolgroup_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = response.parse()
- assert toolgroup is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"):
- client.toolgroups.with_raw_response.unregister(
- "",
- )
-
-
-class TestAsyncToolgroups:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = await async_client.toolgroups.list()
-
- assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.toolgroups.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = await response.parse()
- assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.toolgroups.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = await response.parse()
- assert_matches_type(ToolgroupListResponse, toolgroup, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = await async_client.toolgroups.get(
- "toolgroup_id",
- )
-
- assert_matches_type(ToolGroup, toolgroup, path=["response"])
-
- @parametrize
- async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.toolgroups.with_raw_response.get(
- "toolgroup_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = await response.parse()
- assert_matches_type(ToolGroup, toolgroup, path=["response"])
-
- @parametrize
- async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.toolgroups.with_streaming_response.get(
- "toolgroup_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = await response.parse()
- assert_matches_type(ToolGroup, toolgroup, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"):
- await async_client.toolgroups.with_raw_response.get(
- "",
- )
-
- @parametrize
- async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = await async_client.toolgroups.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- )
-
- assert toolgroup is None
-
- @parametrize
- async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = await async_client.toolgroups.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- args={"foo": "bar"},
- mcp_endpoint={"uri": "uri"},
- )
-
- assert toolgroup is None
-
- @parametrize
- async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.toolgroups.with_raw_response.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = await response.parse()
- assert toolgroup is None
-
- @parametrize
- async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.toolgroups.with_streaming_response.register(
- provider_id="provider_id",
- toolgroup_id="toolgroup_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = await response.parse()
- assert toolgroup is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- toolgroup = await async_client.toolgroups.unregister(
- "toolgroup_id",
- )
-
- assert toolgroup is None
-
- @parametrize
- async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.toolgroups.with_raw_response.unregister(
- "toolgroup_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- toolgroup = await response.parse()
- assert toolgroup is None
-
- @parametrize
- async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.toolgroups.with_streaming_response.unregister(
- "toolgroup_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- toolgroup = await response.parse()
- assert toolgroup is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `toolgroup_id` but received ''"):
- await async_client.toolgroups.with_raw_response.unregister(
- "",
- )
diff --git a/tests/api_resources/test_tools.py b/tests/api_resources/test_tools.py
deleted file mode 100644
index 5a8dfdc2..00000000
--- a/tests/api_resources/test_tools.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import ToolDef, ToolListResponse
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestTools:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool = client.tools.list()
-
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- @parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool = client.tools.list(
- toolgroup_id="toolgroup_id",
- )
-
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.tools.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = response.parse()
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.tools.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = response.parse()
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool = client.tools.get(
- "tool_name",
- )
-
- assert_matches_type(ToolDef, tool, path=["response"])
-
- @parametrize
- def test_raw_response_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.tools.with_raw_response.get(
- "tool_name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = response.parse()
- assert_matches_type(ToolDef, tool, path=["response"])
-
- @parametrize
- def test_streaming_response_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.tools.with_streaming_response.get(
- "tool_name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = response.parse()
- assert_matches_type(ToolDef, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_get(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `tool_name` but received ''"):
- client.tools.with_raw_response.get(
- "",
- )
-
-
-class TestAsyncTools:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool = await async_client.tools.list()
-
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool = await async_client.tools.list(
- toolgroup_id="toolgroup_id",
- )
-
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.tools.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = await response.parse()
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.tools.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = await response.parse()
- assert_matches_type(ToolListResponse, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- tool = await async_client.tools.get(
- "tool_name",
- )
-
- assert_matches_type(ToolDef, tool, path=["response"])
-
- @parametrize
- async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.tools.with_raw_response.get(
- "tool_name",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tool = await response.parse()
- assert_matches_type(ToolDef, tool, path=["response"])
-
- @parametrize
- async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.tools.with_streaming_response.get(
- "tool_name",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tool = await response.parse()
- assert_matches_type(ToolDef, tool, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_get(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `tool_name` but received ''"):
- await async_client.tools.with_raw_response.get(
- "",
- )
From 0e98cfdcf7779ca24ef4dbd7e9e8d9c75fa2a751 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:13:02 +0000
Subject: [PATCH 17/26] chore(internal): update gitignore
From e364f5d36aa83e74c4970028783380f374aa95bb Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 23 Mar 2026 14:24:54 +0000
Subject: [PATCH 18/26] codegen metadata
---
.stats.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 5766c12e..2a7d1629 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 94
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-96f8902f19a43855a6ff92a7600f0c44fe59b0bd932e425db3d00cf87d0cc87d.yml
-openapi_spec_hash: 77fb0034945bafbb22c11db8128d6e12
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-5e65a08e9e75a43349095adac379b0ea2fe508538fc9819d5675d9e1f57564de.yml
+openapi_spec_hash: 71eed98516d334012db2f7bbfc16dafb
config_hash: 7d5765272a641656f8231509937663a7
From 87cb87e8ecd52d95b5a375f8b4c00f5837e4feeb Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:14:09 +0000
Subject: [PATCH 19/26] chore(tests): bump steady to v0.19.6
---
scripts/test | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/test b/scripts/test
index 92bf44f0..64582d4a 100755
--- a/scripts/test
+++ b/scripts/test
@@ -47,7 +47,7 @@ elif ! prism_is_running; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.5 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.6 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From b096c2ce513a5d2de9a17e7841609feb30d1b0b2 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 24 Mar 2026 15:14:24 +0000
Subject: [PATCH 20/26] chore(ci): skip lint on metadata-only changes
Note that we still want to run tests, as these depend on the metadata.
---
.github/workflows/ci.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4dac4bbc..d7d6fab0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -20,7 +20,7 @@ jobs:
timeout-minutes: 10
name: lint
runs-on: ${{ github.repository == 'stainless-sdks/llama-stack-client-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
steps:
- uses: actions/checkout@v6
@@ -36,7 +36,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
+ if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata')
timeout-minutes: 10
name: build
permissions:
From 10f6ed745b38d89be2d6a5eb007427b015e84e23 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:15:30 +0000
Subject: [PATCH 21/26] chore(tests): bump steady to v0.19.7
---
scripts/test | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/test b/scripts/test
index 64582d4a..ed1e0ff0 100755
--- a/scripts/test
+++ b/scripts/test
@@ -47,7 +47,7 @@ elif ! prism_is_running; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.6 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
echo
exit 1
From f5c27db9d2716098a116d516cc5ad673ee621988 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Mar 2026 15:41:04 +0000
Subject: [PATCH 22/26] refactor!: rename agents API to responses API
---
.stats.yml | 4 ++--
src/llama_stack_client/_client.py | 6 ------
.../resources/responses/input_items.py | 4 ----
.../resources/responses/responses.py | 10 ----------
4 files changed, 2 insertions(+), 22 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 2a7d1629..39b8c0e2 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 94
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-5e65a08e9e75a43349095adac379b0ea2fe508538fc9819d5675d9e1f57564de.yml
-openapi_spec_hash: 71eed98516d334012db2f7bbfc16dafb
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-cf1401782e1b8568e067f180b365bc6eb17842141f34a716ab1436247a173b2e.yml
+openapi_spec_hash: abf916a5ea9bcc53c2386a181859ba31
config_hash: 7d5765272a641656f8231509937663a7
diff --git a/src/llama_stack_client/_client.py b/src/llama_stack_client/_client.py
index ba967d59..de6801c6 100644
--- a/src/llama_stack_client/_client.py
+++ b/src/llama_stack_client/_client.py
@@ -156,7 +156,6 @@ def __init__(
@cached_property
def responses(self) -> ResponsesResource:
- """APIs for creating and interacting with agentic systems."""
from .resources.responses import ResponsesResource
return ResponsesResource(self)
@@ -497,7 +496,6 @@ def __init__(
@cached_property
def responses(self) -> AsyncResponsesResource:
- """APIs for creating and interacting with agentic systems."""
from .resources.responses import AsyncResponsesResource
return AsyncResponsesResource(self)
@@ -785,7 +783,6 @@ def __init__(self, client: LlamaStackClient) -> None:
@cached_property
def responses(self) -> responses.ResponsesResourceWithRawResponse:
- """APIs for creating and interacting with agentic systems."""
from .resources.responses import ResponsesResourceWithRawResponse
return ResponsesResourceWithRawResponse(self._client.responses)
@@ -959,7 +956,6 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
@cached_property
def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
- """APIs for creating and interacting with agentic systems."""
from .resources.responses import AsyncResponsesResourceWithRawResponse
return AsyncResponsesResourceWithRawResponse(self._client.responses)
@@ -1133,7 +1129,6 @@ def __init__(self, client: LlamaStackClient) -> None:
@cached_property
def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
- """APIs for creating and interacting with agentic systems."""
from .resources.responses import ResponsesResourceWithStreamingResponse
return ResponsesResourceWithStreamingResponse(self._client.responses)
@@ -1307,7 +1302,6 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
@cached_property
def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
- """APIs for creating and interacting with agentic systems."""
from .resources.responses import AsyncResponsesResourceWithStreamingResponse
return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
diff --git a/src/llama_stack_client/resources/responses/input_items.py b/src/llama_stack_client/resources/responses/input_items.py
index 3daa5441..84433369 100644
--- a/src/llama_stack_client/resources/responses/input_items.py
+++ b/src/llama_stack_client/resources/responses/input_items.py
@@ -31,8 +31,6 @@
class InputItemsResource(SyncAPIResource):
- """APIs for creating and interacting with agentic systems."""
-
@cached_property
def with_raw_response(self) -> InputItemsResourceWithRawResponse:
"""
@@ -131,8 +129,6 @@ def list(
class AsyncInputItemsResource(AsyncAPIResource):
- """APIs for creating and interacting with agentic systems."""
-
@cached_property
def with_raw_response(self) -> AsyncInputItemsResourceWithRawResponse:
"""
diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/llama_stack_client/resources/responses/responses.py
index 070cbb8e..6bc14161 100644
--- a/src/llama_stack_client/resources/responses/responses.py
+++ b/src/llama_stack_client/resources/responses/responses.py
@@ -44,11 +44,8 @@
class ResponsesResource(SyncAPIResource):
- """APIs for creating and interacting with agentic systems."""
-
@cached_property
def input_items(self) -> InputItemsResource:
- """APIs for creating and interacting with agentic systems."""
return InputItemsResource(self._client)
@cached_property
@@ -720,11 +717,8 @@ def delete(
class AsyncResponsesResource(AsyncAPIResource):
- """APIs for creating and interacting with agentic systems."""
-
@cached_property
def input_items(self) -> AsyncInputItemsResource:
- """APIs for creating and interacting with agentic systems."""
return AsyncInputItemsResource(self._client)
@cached_property
@@ -1414,7 +1408,6 @@ def __init__(self, responses: ResponsesResource) -> None:
@cached_property
def input_items(self) -> InputItemsResourceWithRawResponse:
- """APIs for creating and interacting with agentic systems."""
return InputItemsResourceWithRawResponse(self._responses.input_items)
@@ -1437,7 +1430,6 @@ def __init__(self, responses: AsyncResponsesResource) -> None:
@cached_property
def input_items(self) -> AsyncInputItemsResourceWithRawResponse:
- """APIs for creating and interacting with agentic systems."""
return AsyncInputItemsResourceWithRawResponse(self._responses.input_items)
@@ -1460,7 +1452,6 @@ def __init__(self, responses: ResponsesResource) -> None:
@cached_property
def input_items(self) -> InputItemsResourceWithStreamingResponse:
- """APIs for creating and interacting with agentic systems."""
return InputItemsResourceWithStreamingResponse(self._responses.input_items)
@@ -1483,5 +1474,4 @@ def __init__(self, responses: AsyncResponsesResource) -> None:
@cached_property
def input_items(self) -> AsyncInputItemsResourceWithStreamingResponse:
- """APIs for creating and interacting with agentic systems."""
return AsyncInputItemsResourceWithStreamingResponse(self._responses.input_items)
From dad9f546400133d34a0cd650a227800be78b0d1f Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 26 Mar 2026 15:57:50 +0000
Subject: [PATCH 23/26] feat!: eliminate GET /chat/completions/{completion_id}
conformance issues
---
.stats.yml | 4 +-
.../types/chat/completion_create_params.py | 13 ++--
.../types/chat/completion_create_response.py | 53 ++++++++++++----
.../types/chat/completion_list_response.py | 61 +++++++++++++------
.../chat/completion_retrieve_response.py | 61 +++++++++++++------
.../types/chat_completion_chunk.py | 13 ++--
.../types/completion_create_response.py | 9 +--
.../types/safety_run_shield_params.py | 13 ++--
8 files changed, 146 insertions(+), 81 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 39b8c0e2..fb448f65 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 94
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-cf1401782e1b8568e067f180b365bc6eb17842141f34a716ab1436247a173b2e.yml
-openapi_spec_hash: abf916a5ea9bcc53c2386a181859ba31
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-110b5cc180c866a86135e793e00c3ce5aad90b2e46f74980f61f68f997722591.yml
+openapi_spec_hash: 1e49b4df5864bb56a155e0950dd30241
config_hash: 7d5765272a641656f8231509937663a7
diff --git a/src/llama_stack_client/types/chat/completion_create_params.py b/src/llama_stack_client/types/chat/completion_create_params.py
index af533b6a..58e48a9f 100644
--- a/src/llama_stack_client/types/chat/completion_create_params.py
+++ b/src/llama_stack_client/types/chat/completion_create_params.py
@@ -256,7 +256,7 @@ class MessageOpenAIAssistantMessageParamInputContentListOpenAIChatCompletionCont
class MessageOpenAIAssistantMessageParamInputToolCallFunction(TypedDict, total=False):
- """Function call details for OpenAI-compatible tool calls."""
+ """Function call details."""
arguments: Required[str]
"""Arguments to pass to the function as a JSON string."""
@@ -268,16 +268,13 @@ class MessageOpenAIAssistantMessageParamInputToolCallFunction(TypedDict, total=F
class MessageOpenAIAssistantMessageParamInputToolCall(TypedDict, total=False):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str]
+ id: Required[str]
"""Unique identifier for the tool call."""
- function: Optional[MessageOpenAIAssistantMessageParamInputToolCallFunction]
- """Function call details for OpenAI-compatible tool calls."""
+ function: Required[MessageOpenAIAssistantMessageParamInputToolCallFunction]
+ """Function call details."""
- index: Optional[int]
- """Index of the tool call in the list."""
-
- type: Literal["function"]
+ type: Required[Literal["function"]]
"""Must be 'function' to identify this as a function call."""
diff --git a/src/llama_stack_client/types/chat/completion_create_response.py b/src/llama_stack_client/types/chat/completion_create_response.py
index a76b557f..0d20c058 100644
--- a/src/llama_stack_client/types/chat/completion_create_response.py
+++ b/src/llama_stack_client/types/chat/completion_create_response.py
@@ -6,9 +6,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Dict, List, Optional
-from typing_extensions import Literal
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = [
@@ -18,6 +19,9 @@
"ChoiceMessageFunctionCall",
"ChoiceMessageToolCall",
"ChoiceMessageToolCallFunction",
+ "ChoiceMessageToolCallFunctionFunction",
+ "ChoiceMessageToolCallCustom",
+ "ChoiceMessageToolCallCustomCustom",
"ChoiceLogprobs",
"ChoiceLogprobsContent",
"ChoiceLogprobsContentTopLogprob",
@@ -39,8 +43,8 @@ class ChoiceMessageFunctionCall(BaseModel):
"""Name of the function to call."""
-class ChoiceMessageToolCallFunction(BaseModel):
- """Function call details for OpenAI-compatible tool calls."""
+class ChoiceMessageToolCallFunctionFunction(BaseModel):
+ """Function call details."""
arguments: str
"""Arguments to pass to the function as a JSON string."""
@@ -49,22 +53,47 @@ class ChoiceMessageToolCallFunction(BaseModel):
"""Name of the function to call."""
-class ChoiceMessageToolCall(BaseModel):
+class ChoiceMessageToolCallFunction(BaseModel):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str] = None
+ id: str
"""Unique identifier for the tool call."""
- function: Optional[ChoiceMessageToolCallFunction] = None
- """Function call details for OpenAI-compatible tool calls."""
+ function: ChoiceMessageToolCallFunctionFunction
+ """Function call details."""
- index: Optional[int] = None
- """Index of the tool call in the list."""
-
- type: Optional[Literal["function"]] = None
+ type: Literal["function"]
"""Must be 'function' to identify this as a function call."""
+class ChoiceMessageToolCallCustomCustom(BaseModel):
+ """The custom tool that the model called."""
+
+ input: str
+ """The input for the custom tool call generated by the model."""
+
+ name: str
+ """The name of the custom tool to call."""
+
+
+class ChoiceMessageToolCallCustom(BaseModel):
+ """A call to a custom tool created by the model."""
+
+ id: str
+ """The ID of the tool call."""
+
+ custom: ChoiceMessageToolCallCustomCustom
+ """The custom tool that the model called."""
+
+ type: Literal["custom"]
+ """The type of the tool. Always 'custom'."""
+
+
+ChoiceMessageToolCall: TypeAlias = Annotated[
+ Union[ChoiceMessageToolCallFunction, ChoiceMessageToolCallCustom], PropertyInfo(discriminator="type")
+]
+
+
class ChoiceMessage(BaseModel):
"""The message from the model."""
diff --git a/src/llama_stack_client/types/chat/completion_list_response.py b/src/llama_stack_client/types/chat/completion_list_response.py
index 1f27d036..b02fd8e7 100644
--- a/src/llama_stack_client/types/chat/completion_list_response.py
+++ b/src/llama_stack_client/types/chat/completion_list_response.py
@@ -22,6 +22,9 @@
"DataChoiceMessageFunctionCall",
"DataChoiceMessageToolCall",
"DataChoiceMessageToolCallFunction",
+ "DataChoiceMessageToolCallFunctionFunction",
+ "DataChoiceMessageToolCallCustom",
+ "DataChoiceMessageToolCallCustomCustom",
"DataChoiceLogprobs",
"DataChoiceLogprobsContent",
"DataChoiceLogprobsContentTopLogprob",
@@ -61,8 +64,8 @@ class DataChoiceMessageFunctionCall(BaseModel):
"""Name of the function to call."""
-class DataChoiceMessageToolCallFunction(BaseModel):
- """Function call details for OpenAI-compatible tool calls."""
+class DataChoiceMessageToolCallFunctionFunction(BaseModel):
+ """Function call details."""
arguments: str
"""Arguments to pass to the function as a JSON string."""
@@ -71,22 +74,47 @@ class DataChoiceMessageToolCallFunction(BaseModel):
"""Name of the function to call."""
-class DataChoiceMessageToolCall(BaseModel):
+class DataChoiceMessageToolCallFunction(BaseModel):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str] = None
+ id: str
"""Unique identifier for the tool call."""
- function: Optional[DataChoiceMessageToolCallFunction] = None
- """Function call details for OpenAI-compatible tool calls."""
+ function: DataChoiceMessageToolCallFunctionFunction
+ """Function call details."""
- index: Optional[int] = None
- """Index of the tool call in the list."""
-
- type: Optional[Literal["function"]] = None
+ type: Literal["function"]
"""Must be 'function' to identify this as a function call."""
+class DataChoiceMessageToolCallCustomCustom(BaseModel):
+ """The custom tool that the model called."""
+
+ input: str
+ """The input for the custom tool call generated by the model."""
+
+ name: str
+ """The name of the custom tool to call."""
+
+
+class DataChoiceMessageToolCallCustom(BaseModel):
+ """A call to a custom tool created by the model."""
+
+ id: str
+ """The ID of the tool call."""
+
+ custom: DataChoiceMessageToolCallCustomCustom
+ """The custom tool that the model called."""
+
+ type: Literal["custom"]
+ """The type of the tool. Always 'custom'."""
+
+
+DataChoiceMessageToolCall: TypeAlias = Annotated[
+ Union[DataChoiceMessageToolCallFunction, DataChoiceMessageToolCallCustom], PropertyInfo(discriminator="type")
+]
+
+
class DataChoiceMessage(BaseModel):
"""The message from the model."""
@@ -332,7 +360,7 @@ class DataInputMessageOpenAIAssistantMessageParamOutputContentListOpenAIChatComp
class DataInputMessageOpenAIAssistantMessageParamOutputToolCallFunction(BaseModel):
- """Function call details for OpenAI-compatible tool calls."""
+ """Function call details."""
arguments: str
"""Arguments to pass to the function as a JSON string."""
@@ -344,16 +372,13 @@ class DataInputMessageOpenAIAssistantMessageParamOutputToolCallFunction(BaseMode
class DataInputMessageOpenAIAssistantMessageParamOutputToolCall(BaseModel):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str] = None
+ id: str
"""Unique identifier for the tool call."""
- function: Optional[DataInputMessageOpenAIAssistantMessageParamOutputToolCallFunction] = None
- """Function call details for OpenAI-compatible tool calls."""
-
- index: Optional[int] = None
- """Index of the tool call in the list."""
+ function: DataInputMessageOpenAIAssistantMessageParamOutputToolCallFunction
+ """Function call details."""
- type: Optional[Literal["function"]] = None
+ type: Literal["function"]
"""Must be 'function' to identify this as a function call."""
diff --git a/src/llama_stack_client/types/chat/completion_retrieve_response.py b/src/llama_stack_client/types/chat/completion_retrieve_response.py
index b3d783c9..7488f69e 100644
--- a/src/llama_stack_client/types/chat/completion_retrieve_response.py
+++ b/src/llama_stack_client/types/chat/completion_retrieve_response.py
@@ -21,6 +21,9 @@
"ChoiceMessageFunctionCall",
"ChoiceMessageToolCall",
"ChoiceMessageToolCallFunction",
+ "ChoiceMessageToolCallFunctionFunction",
+ "ChoiceMessageToolCallCustom",
+ "ChoiceMessageToolCallCustomCustom",
"ChoiceLogprobs",
"ChoiceLogprobsContent",
"ChoiceLogprobsContentTopLogprob",
@@ -60,8 +63,8 @@ class ChoiceMessageFunctionCall(BaseModel):
"""Name of the function to call."""
-class ChoiceMessageToolCallFunction(BaseModel):
- """Function call details for OpenAI-compatible tool calls."""
+class ChoiceMessageToolCallFunctionFunction(BaseModel):
+ """Function call details."""
arguments: str
"""Arguments to pass to the function as a JSON string."""
@@ -70,22 +73,47 @@ class ChoiceMessageToolCallFunction(BaseModel):
"""Name of the function to call."""
-class ChoiceMessageToolCall(BaseModel):
+class ChoiceMessageToolCallFunction(BaseModel):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str] = None
+ id: str
"""Unique identifier for the tool call."""
- function: Optional[ChoiceMessageToolCallFunction] = None
- """Function call details for OpenAI-compatible tool calls."""
+ function: ChoiceMessageToolCallFunctionFunction
+ """Function call details."""
- index: Optional[int] = None
- """Index of the tool call in the list."""
-
- type: Optional[Literal["function"]] = None
+ type: Literal["function"]
"""Must be 'function' to identify this as a function call."""
+class ChoiceMessageToolCallCustomCustom(BaseModel):
+ """The custom tool that the model called."""
+
+ input: str
+ """The input for the custom tool call generated by the model."""
+
+ name: str
+ """The name of the custom tool to call."""
+
+
+class ChoiceMessageToolCallCustom(BaseModel):
+ """A call to a custom tool created by the model."""
+
+ id: str
+ """The ID of the tool call."""
+
+ custom: ChoiceMessageToolCallCustomCustom
+ """The custom tool that the model called."""
+
+ type: Literal["custom"]
+ """The type of the tool. Always 'custom'."""
+
+
+ChoiceMessageToolCall: TypeAlias = Annotated[
+ Union[ChoiceMessageToolCallFunction, ChoiceMessageToolCallCustom], PropertyInfo(discriminator="type")
+]
+
+
class ChoiceMessage(BaseModel):
"""The message from the model."""
@@ -329,7 +357,7 @@ class InputMessageOpenAIAssistantMessageParamOutputContentListOpenAIChatCompleti
class InputMessageOpenAIAssistantMessageParamOutputToolCallFunction(BaseModel):
- """Function call details for OpenAI-compatible tool calls."""
+ """Function call details."""
arguments: str
"""Arguments to pass to the function as a JSON string."""
@@ -341,16 +369,13 @@ class InputMessageOpenAIAssistantMessageParamOutputToolCallFunction(BaseModel):
class InputMessageOpenAIAssistantMessageParamOutputToolCall(BaseModel):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str] = None
+ id: str
"""Unique identifier for the tool call."""
- function: Optional[InputMessageOpenAIAssistantMessageParamOutputToolCallFunction] = None
- """Function call details for OpenAI-compatible tool calls."""
-
- index: Optional[int] = None
- """Index of the tool call in the list."""
+ function: InputMessageOpenAIAssistantMessageParamOutputToolCallFunction
+ """Function call details."""
- type: Optional[Literal["function"]] = None
+ type: Literal["function"]
"""Must be 'function' to identify this as a function call."""
diff --git a/src/llama_stack_client/types/chat_completion_chunk.py b/src/llama_stack_client/types/chat_completion_chunk.py
index 69b6a4ef..3b3190a8 100644
--- a/src/llama_stack_client/types/chat_completion_chunk.py
+++ b/src/llama_stack_client/types/chat_completion_chunk.py
@@ -29,7 +29,7 @@
class ChoiceDeltaToolCallFunction(BaseModel):
- """Function call details for OpenAI-compatible tool calls."""
+ """Function call details."""
arguments: str
"""Arguments to pass to the function as a JSON string."""
@@ -41,16 +41,13 @@ class ChoiceDeltaToolCallFunction(BaseModel):
class ChoiceDeltaToolCall(BaseModel):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str] = None
+ id: str
"""Unique identifier for the tool call."""
- function: Optional[ChoiceDeltaToolCallFunction] = None
- """Function call details for OpenAI-compatible tool calls."""
-
- index: Optional[int] = None
- """Index of the tool call in the list."""
+ function: ChoiceDeltaToolCallFunction
+ """Function call details."""
- type: Optional[Literal["function"]] = None
+ type: Literal["function"]
"""Must be 'function' to identify this as a function call."""
diff --git a/src/llama_stack_client/types/completion_create_response.py b/src/llama_stack_client/types/completion_create_response.py
index 579c4897..30e42294 100644
--- a/src/llama_stack_client/types/completion_create_response.py
+++ b/src/llama_stack_client/types/completion_create_response.py
@@ -89,9 +89,7 @@ class ChoiceLogprobsRefusal(BaseModel):
class ChoiceLogprobs(BaseModel):
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.
- """
+ """The log probabilities for the tokens in the choice."""
content: Optional[List[ChoiceLogprobsContent]] = None
"""The log probabilities for the tokens in the message."""
@@ -113,10 +111,7 @@ class Choice(BaseModel):
"""The text of the choice."""
logprobs: Optional[ChoiceLogprobs] = None
- """
- The log probabilities for the tokens in the message from an OpenAI-compatible
- chat completion response.
- """
+ """The log probabilities for the tokens in the choice."""
class CompletionCreateResponse(BaseModel):
diff --git a/src/llama_stack_client/types/safety_run_shield_params.py b/src/llama_stack_client/types/safety_run_shield_params.py
index 150579c1..70b52b84 100644
--- a/src/llama_stack_client/types/safety_run_shield_params.py
+++ b/src/llama_stack_client/types/safety_run_shield_params.py
@@ -175,7 +175,7 @@ class MessageOpenAIAssistantMessageParamInputContentListOpenAIChatCompletionCont
class MessageOpenAIAssistantMessageParamInputToolCallFunction(TypedDict, total=False):
- """Function call details for OpenAI-compatible tool calls."""
+ """Function call details."""
arguments: Required[str]
"""Arguments to pass to the function as a JSON string."""
@@ -187,16 +187,13 @@ class MessageOpenAIAssistantMessageParamInputToolCallFunction(TypedDict, total=F
class MessageOpenAIAssistantMessageParamInputToolCall(TypedDict, total=False):
"""Tool call specification for OpenAI-compatible chat completion responses."""
- id: Optional[str]
+ id: Required[str]
"""Unique identifier for the tool call."""
- function: Optional[MessageOpenAIAssistantMessageParamInputToolCallFunction]
- """Function call details for OpenAI-compatible tool calls."""
+ function: Required[MessageOpenAIAssistantMessageParamInputToolCallFunction]
+ """Function call details."""
- index: Optional[int]
- """Index of the tool call in the list."""
-
- type: Literal["function"]
+ type: Required[Literal["function"]]
"""Must be 'function' to identify this as a function call."""
From 6694121eee689fb7033704bad2b698a4640e2431 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:16:14 +0000
Subject: [PATCH 24/26] feat(internal): implement indices array format for
query and form serialization
---
scripts/test | 2 +-
src/llama_stack_client/_qs.py | 5 ++++-
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/scripts/test b/scripts/test
index ed1e0ff0..7772d870 100755
--- a/scripts/test
+++ b/scripts/test
@@ -47,7 +47,7 @@ elif ! prism_is_running; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-form-array-format=comma --validator-query-array-format=comma --validator-form-object-format=brackets --validator-query-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.19.7 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
diff --git a/src/llama_stack_client/_qs.py b/src/llama_stack_client/_qs.py
index 1bb860d0..1a49e008 100644
--- a/src/llama_stack_client/_qs.py
+++ b/src/llama_stack_client/_qs.py
@@ -107,7 +107,10 @@ def _stringify_item(
items.extend(self._stringify_item(key, item, opts))
return items
elif array_format == "indices":
- raise NotImplementedError("The array indices format is not supported yet")
+ items = []
+ for i, item in enumerate(value):
+ items.extend(self._stringify_item(f"{key}[{i}]", item, opts))
+ return items
elif array_format == "brackets":
items = []
key = key + "[]"
From d9bc91afecb64ec27b97d37699d5ff6c1222d369 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 27 Mar 2026 13:16:26 +0000
Subject: [PATCH 25/26] feat(responses): add cancel endpoint for background
responses
---
.stats.yml | 4 +--
.../types/chat/completion_create_params.py | 2 ++
.../types/chat/completion_list_response.py | 4 +++
.../chat/completion_retrieve_response.py | 4 +++
.../types/conversation_create_params.py | 6 ++++
.../types/conversations/item_create_params.py | 6 ++++
.../conversations/item_create_response.py | 6 ++++
.../types/conversations/item_get_response.py | 6 ++++
.../types/conversations/item_list_response.py | 6 ++++
.../types/response_create_params.py | 6 ++++
.../types/response_list_response.py | 14 +++++++-
.../types/response_object.py | 8 ++++-
.../types/response_object_stream.py | 36 +++++++++++++++++++
.../responses/input_item_list_response.py | 6 ++++
.../types/safety_run_shield_params.py | 2 ++
15 files changed, 112 insertions(+), 4 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index fb448f65..df5cadf3 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 94
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-110b5cc180c866a86135e793e00c3ce5aad90b2e46f74980f61f68f997722591.yml
-openapi_spec_hash: 1e49b4df5864bb56a155e0950dd30241
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-ce0519da94b80140cbcd7c35fa41c78daa192530a7525b3452f2a0b2e998cfc8.yml
+openapi_spec_hash: 8a9ec9c7c3a1216ec0e8580ad598a529
config_hash: 7d5765272a641656f8231509937663a7
diff --git a/src/llama_stack_client/types/chat/completion_create_params.py b/src/llama_stack_client/types/chat/completion_create_params.py
index 58e48a9f..605e1faa 100644
--- a/src/llama_stack_client/types/chat/completion_create_params.py
+++ b/src/llama_stack_client/types/chat/completion_create_params.py
@@ -179,6 +179,8 @@ class MessageOpenAIUserMessageParamInputContentListOpenAIChatCompletionContentPa
class MessageOpenAIUserMessageParamInputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFile(
TypedDict, total=False
):
+ """File content part for OpenAI-compatible chat completion messages."""
+
file: Required[
MessageOpenAIUserMessageParamInputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFileFile
]
diff --git a/src/llama_stack_client/types/chat/completion_list_response.py b/src/llama_stack_client/types/chat/completion_list_response.py
index b02fd8e7..aa7cf955 100644
--- a/src/llama_stack_client/types/chat/completion_list_response.py
+++ b/src/llama_stack_client/types/chat/completion_list_response.py
@@ -286,6 +286,8 @@ class DataInputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletio
class DataInputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFile(
BaseModel
):
+ """File content part for OpenAI-compatible chat completion messages."""
+
file: DataInputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFileFile
"""File specification."""
@@ -512,6 +514,8 @@ class DataUsage(BaseModel):
class Data(BaseModel):
+ """Chat completion response extended with the original input messages."""
+
id: str
"""The ID of the chat completion."""
diff --git a/src/llama_stack_client/types/chat/completion_retrieve_response.py b/src/llama_stack_client/types/chat/completion_retrieve_response.py
index 7488f69e..4ea2d02c 100644
--- a/src/llama_stack_client/types/chat/completion_retrieve_response.py
+++ b/src/llama_stack_client/types/chat/completion_retrieve_response.py
@@ -285,6 +285,8 @@ class InputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletionCon
class InputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFile(
BaseModel
):
+ """File content part for OpenAI-compatible chat completion messages."""
+
file: InputMessageOpenAIUserMessageParamOutputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFileFile
"""File specification."""
@@ -509,6 +511,8 @@ class Usage(BaseModel):
class CompletionRetrieveResponse(BaseModel):
+ """Chat completion response extended with the original input messages."""
+
id: str
"""The ID of the chat completion."""
diff --git a/src/llama_stack_client/types/conversation_create_params.py b/src/llama_stack_client/types/conversation_create_params.py
index 02f028c7..dfaae93e 100644
--- a/src/llama_stack_client/types/conversation_create_params.py
+++ b/src/llama_stack_client/types/conversation_create_params.py
@@ -136,6 +136,8 @@ class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageConten
class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation(
TypedDict, total=False
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: Required[str]
end_index: Required[int]
@@ -152,6 +154,8 @@ class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageConten
class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath(
TypedDict, total=False
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: Required[str]
index: Required[int]
@@ -211,6 +215,8 @@ class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageConten
class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput(
TypedDict, total=False
):
+ """Text content within an output message of an OpenAI response."""
+
text: Required[str]
annotations: Iterable[
diff --git a/src/llama_stack_client/types/conversations/item_create_params.py b/src/llama_stack_client/types/conversations/item_create_params.py
index 6336be36..22d49519 100644
--- a/src/llama_stack_client/types/conversations/item_create_params.py
+++ b/src/llama_stack_client/types/conversations/item_create_params.py
@@ -136,6 +136,8 @@ class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageConten
class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation(
TypedDict, total=False
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: Required[str]
end_index: Required[int]
@@ -152,6 +154,8 @@ class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageConten
class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath(
TypedDict, total=False
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: Required[str]
index: Required[int]
@@ -211,6 +215,8 @@ class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageConten
class ItemOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput(
TypedDict, total=False
):
+ """Text content within an output message of an OpenAI response."""
+
text: Required[str]
annotations: Iterable[
diff --git a/src/llama_stack_client/types/conversations/item_create_response.py b/src/llama_stack_client/types/conversations/item_create_response.py
index c8a167d1..c40c4d60 100644
--- a/src/llama_stack_client/types/conversations/item_create_response.py
+++ b/src/llama_stack_client/types/conversations/item_create_response.py
@@ -130,6 +130,8 @@ class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageConte
class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -146,6 +148,8 @@ class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageConte
class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -208,6 +212,8 @@ class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageConte
class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
diff --git a/src/llama_stack_client/types/conversations/item_get_response.py b/src/llama_stack_client/types/conversations/item_get_response.py
index 8419170b..069f1d05 100644
--- a/src/llama_stack_client/types/conversations/item_get_response.py
+++ b/src/llama_stack_client/types/conversations/item_get_response.py
@@ -129,6 +129,8 @@ class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOu
class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -145,6 +147,8 @@ class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOu
class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -207,6 +211,8 @@ class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOu
class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
diff --git a/src/llama_stack_client/types/conversations/item_list_response.py b/src/llama_stack_client/types/conversations/item_list_response.py
index 1a8b4695..c87d3e9c 100644
--- a/src/llama_stack_client/types/conversations/item_list_response.py
+++ b/src/llama_stack_client/types/conversations/item_list_response.py
@@ -129,6 +129,8 @@ class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOu
class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -145,6 +147,8 @@ class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOu
class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -207,6 +211,8 @@ class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOu
class OpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/llama_stack_client/types/response_create_params.py
index 7c08f512..59886a69 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/llama_stack_client/types/response_create_params.py
@@ -283,6 +283,8 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation(
TypedDict, total=False
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: Required[str]
end_index: Required[int]
@@ -299,6 +301,8 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath(
TypedDict, total=False
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: Required[str]
index: Required[int]
@@ -358,6 +362,8 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput(
TypedDict, total=False
):
+ """Text content within an output message of an OpenAI response."""
+
text: Required[str]
annotations: Iterable[
diff --git a/src/llama_stack_client/types/response_list_response.py b/src/llama_stack_client/types/response_list_response.py
index b16b10be..d09d7110 100644
--- a/src/llama_stack_client/types/response_list_response.py
+++ b/src/llama_stack_client/types/response_list_response.py
@@ -184,6 +184,8 @@ class InputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCont
class InputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -200,6 +202,8 @@ class InputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCont
class InputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -262,6 +266,8 @@ class InputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCont
class InputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
@@ -615,6 +621,8 @@ class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCon
class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -631,6 +639,8 @@ class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCon
class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -693,6 +703,8 @@ class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCon
class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
@@ -1259,7 +1271,7 @@ class ResponseListResponse(BaseModel):
"""Text response configuration for OpenAI responses."""
tool_choice: Optional[ToolChoice] = None
- """Constrains the tools available to the model to a pre-defined set."""
+ """Enumeration of simple tool choice modes for response generation."""
tools: Optional[List[Tool]] = None
diff --git a/src/llama_stack_client/types/response_object.py b/src/llama_stack_client/types/response_object.py
index 0088d363..3cc8e89e 100644
--- a/src/llama_stack_client/types/response_object.py
+++ b/src/llama_stack_client/types/response_object.py
@@ -154,6 +154,8 @@ class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCon
class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -170,6 +172,8 @@ class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCon
class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -232,6 +236,8 @@ class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageCon
class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
@@ -804,7 +810,7 @@ def output_text(self) -> str:
"""Text response configuration for OpenAI responses."""
tool_choice: Optional[ToolChoice] = None
- """Constrains the tools available to the model to a pre-defined set."""
+ """Enumeration of simple tool choice modes for response generation."""
tools: Optional[List[Tool]] = None
diff --git a/src/llama_stack_client/types/response_object_stream.py b/src/llama_stack_client/types/response_object_stream.py
index dc3e3e79..ed4a021a 100644
--- a/src/llama_stack_client/types/response_object_stream.py
+++ b/src/llama_stack_client/types/response_object_stream.py
@@ -238,6 +238,8 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage
class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -254,6 +256,8 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage
class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -316,6 +320,8 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage
class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
@@ -607,6 +613,8 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageC
class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -623,6 +631,8 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageC
class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -685,6 +695,8 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageC
class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
@@ -1001,6 +1013,8 @@ class OpenAIResponseObjectStreamResponseWebSearchCallInProgress(BaseModel):
class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel):
+ """Streaming event for web search calls currently searching."""
+
item_id: str
output_index: int
@@ -1023,24 +1037,32 @@ class OpenAIResponseObjectStreamResponseWebSearchCallCompleted(BaseModel):
class OpenAIResponseObjectStreamResponseMcpListToolsInProgress(BaseModel):
+ """Streaming event for MCP list tools operation in progress."""
+
sequence_number: int
type: Optional[Literal["response.mcp_list_tools.in_progress"]] = None
class OpenAIResponseObjectStreamResponseMcpListToolsFailed(BaseModel):
+ """Streaming event for a failed MCP list tools operation."""
+
sequence_number: int
type: Optional[Literal["response.mcp_list_tools.failed"]] = None
class OpenAIResponseObjectStreamResponseMcpListToolsCompleted(BaseModel):
+ """Streaming event for a completed MCP list tools operation."""
+
sequence_number: int
type: Optional[Literal["response.mcp_list_tools.completed"]] = None
class OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta(BaseModel):
+ """Streaming event for incremental MCP call argument updates."""
+
delta: str
item_id: str
@@ -1053,6 +1075,8 @@ class OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta(BaseModel):
class OpenAIResponseObjectStreamResponseMcpCallArgumentsDone(BaseModel):
+ """Streaming event for completed MCP call arguments."""
+
arguments: str
item_id: str
@@ -1125,6 +1149,8 @@ class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseConten
class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -1141,6 +1167,8 @@ class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseConten
class OpenAIResponseObjectStreamResponseContentPartAddedPartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -1290,6 +1318,8 @@ class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContent
class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -1306,6 +1336,8 @@ class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContent
class OpenAIResponseObjectStreamResponseContentPartDonePartOpenAIResponseContentPartOutputTextAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -1599,6 +1631,8 @@ class OpenAIResponseObjectStreamResponseOutputTextAnnotationAddedAnnotationOpenA
class OpenAIResponseObjectStreamResponseOutputTextAnnotationAddedAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -1613,6 +1647,8 @@ class OpenAIResponseObjectStreamResponseOutputTextAnnotationAddedAnnotationOpenA
class OpenAIResponseObjectStreamResponseOutputTextAnnotationAddedAnnotationOpenAIResponseAnnotationFilePath(BaseModel):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
diff --git a/src/llama_stack_client/types/responses/input_item_list_response.py b/src/llama_stack_client/types/responses/input_item_list_response.py
index d97fa944..242962a7 100644
--- a/src/llama_stack_client/types/responses/input_item_list_response.py
+++ b/src/llama_stack_client/types/responses/input_item_list_response.py
@@ -130,6 +130,8 @@ class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageConte
class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
BaseModel
):
+ """Container file citation annotation referencing a file within a container."""
+
container_id: str
end_index: int
@@ -146,6 +148,8 @@ class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageConte
class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
BaseModel
):
+ """File path annotation referencing a generated file in response content."""
+
file_id: str
index: int
@@ -208,6 +212,8 @@ class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageConte
class DataOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
BaseModel
):
+ """Text content within an output message of an OpenAI response."""
+
text: str
annotations: Optional[
diff --git a/src/llama_stack_client/types/safety_run_shield_params.py b/src/llama_stack_client/types/safety_run_shield_params.py
index 70b52b84..f38353c9 100644
--- a/src/llama_stack_client/types/safety_run_shield_params.py
+++ b/src/llama_stack_client/types/safety_run_shield_params.py
@@ -98,6 +98,8 @@ class MessageOpenAIUserMessageParamInputContentListOpenAIChatCompletionContentPa
class MessageOpenAIUserMessageParamInputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFile(
TypedDict, total=False
):
+ """File content part for OpenAI-compatible chat completion messages."""
+
file: Required[
MessageOpenAIUserMessageParamInputContentListOpenAIChatCompletionContentPartTextParamOpenAIChatCompletionContentPartImageParamOpenAIFileOpenAIFileFile
]
From 00c6ed372934d16541c29eaff049bb86b3520d1c Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 28 Mar 2026 00:16:43 +0000
Subject: [PATCH 26/26] release: 0.7.0-alpha.1
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 48 ++++++++++++++++++++++++++++++
pyproject.toml | 2 +-
src/llama_stack_client/_version.py | 2 +-
4 files changed, 51 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 463e3de0..cd920613 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.6.1-alpha.1"
+ ".": "0.7.0-alpha.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e38b7723..a7365909 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,53 @@
# Changelog
+## 0.7.0-alpha.1 (2026-03-28)
+
+Full Changelog: [v0.6.1-alpha.1...v0.7.0-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.6.1-alpha.1...v0.7.0-alpha.1)
+
+### ⚠ BREAKING CHANGES
+
+* eliminate GET /chat/completions/{completion_id} conformance issues
+* rename agents API to responses API
+* eliminate /files/{file_id} GET differences
+
+### Features
+
+* Add stream_options parameter support ([b4c2f15](https://github.com/llamastack/llama-stack-client-python/commit/b4c2f15b16872730a9c254b1b2dfc02aba223a71))
+* eliminate /files/{file_id} GET differences ([1f28d73](https://github.com/llamastack/llama-stack-client-python/commit/1f28d730824b6cb721415985194c5f4567e42ea7))
+* eliminate GET /chat/completions/{completion_id} conformance issues ([dad9f54](https://github.com/llamastack/llama-stack-client-python/commit/dad9f546400133d34a0cd650a227800be78b0d1f))
+* **internal:** implement indices array format for query and form serialization ([6694121](https://github.com/llamastack/llama-stack-client-python/commit/6694121eee689fb7033704bad2b698a4640e2431))
+* **responses:** add cancel endpoint for background responses ([d9bc91a](https://github.com/llamastack/llama-stack-client-python/commit/d9bc91afecb64ec27b97d37699d5ff6c1222d369))
+
+
+### Bug Fixes
+
+* **deps:** bump minimum typing-extensions version ([50ea4d7](https://github.com/llamastack/llama-stack-client-python/commit/50ea4d7fd98a86726f6825d911507b7fc96e2e60))
+* **inference:** improve chat completions OpenAI conformance ([147b88b](https://github.com/llamastack/llama-stack-client-python/commit/147b88b44eb83bceb7cd6204cd79d8dafe8f8e7a))
+* **pydantic:** do not pass `by_alias` unless set ([f6836f9](https://github.com/llamastack/llama-stack-client-python/commit/f6836f9dacef1b9b26774fcfaf82689ae00f374a))
+* remove duplicate dataset_id parameter in append-rows endpoint ([d6a79d0](https://github.com/llamastack/llama-stack-client-python/commit/d6a79d0a830bad4e82b70d7ab9e007ebc16e0f05))
+* sanitize endpoint path params ([9b288d5](https://github.com/llamastack/llama-stack-client-python/commit/9b288d553ae83860fbe1d8ee9352532ed04ddd9b))
+
+
+### Chores
+
+* **ci:** skip lint on metadata-only changes ([b096c2c](https://github.com/llamastack/llama-stack-client-python/commit/b096c2ce513a5d2de9a17e7841609feb30d1b0b2))
+* **internal:** tweak CI branches ([1df7e26](https://github.com/llamastack/llama-stack-client-python/commit/1df7e2605e78572eccc53aa8db1e44d987106a9b))
+* **internal:** update gitignore ([0e98cfd](https://github.com/llamastack/llama-stack-client-python/commit/0e98cfdcf7779ca24ef4dbd7e9e8d9c75fa2a751))
+* **internal:** version bump ([f468096](https://github.com/llamastack/llama-stack-client-python/commit/f46809696ddf1f179cc26984facfcbb7f9264730))
+* **tests:** bump steady to v0.19.4 ([f5ad8f8](https://github.com/llamastack/llama-stack-client-python/commit/f5ad8f801078d79c03ec7723cd64b1c9895def2d))
+* **tests:** bump steady to v0.19.5 ([55689e1](https://github.com/llamastack/llama-stack-client-python/commit/55689e1ddee55d81efff681dbb3523b0ed09d658))
+* **tests:** bump steady to v0.19.6 ([87cb87e](https://github.com/llamastack/llama-stack-client-python/commit/87cb87e8ecd52d95b5a375f8b4c00f5837e4feeb))
+* **tests:** bump steady to v0.19.7 ([10f6ed7](https://github.com/llamastack/llama-stack-client-python/commit/10f6ed745b38d89be2d6a5eb007427b015e84e23))
+
+
+### Refactors
+
+* remove fine_tuning API ([021bd5e](https://github.com/llamastack/llama-stack-client-python/commit/021bd5e6138574884befe6f20ba86ceeefee1767))
+* remove tool_groups from public API and auto-register from provider specs ([c0df2dc](https://github.com/llamastack/llama-stack-client-python/commit/c0df2dcf9bb38600f73db746dc38d3277e74e7b9))
+* rename agents API to responses API ([f5c27db](https://github.com/llamastack/llama-stack-client-python/commit/f5c27db9d2716098a116d516cc5ad673ee621988))
+* rename rag-runtime provider to file-search ([94a14da](https://github.com/llamastack/llama-stack-client-python/commit/94a14dad88ed55d3f2baf1de8eb30ba529fb9818))
+* **tests:** switch from prism to steady ([23d591c](https://github.com/llamastack/llama-stack-client-python/commit/23d591c70549c7f00b7be136a19893dbdd65f43c))
+
## 0.6.1-alpha.1 (2026-03-13)
Full Changelog: [v0.5.0-alpha.2...v0.6.1-alpha.1](https://github.com/llamastack/llama-stack-client-python/compare/v0.5.0-alpha.2...v0.6.1-alpha.1)
diff --git a/pyproject.toml b/pyproject.toml
index 1fd52862..7036b973 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "llama_stack_client"
-version = "0.6.1-alpha.1"
+version = "0.7.0-alpha.1"
description = "The official Python library for the llama-stack-client API"
dynamic = ["readme"]
license = "MIT"
diff --git a/src/llama_stack_client/_version.py b/src/llama_stack_client/_version.py
index b7f019af..eba92666 100644
--- a/src/llama_stack_client/_version.py
+++ b/src/llama_stack_client/_version.py
@@ -7,4 +7,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "llama_stack_client"
-__version__ = "0.6.1-alpha.1" # x-release-please-version
+__version__ = "0.7.0-alpha.1" # x-release-please-version