Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ dependencies = [
"dotenv",
"pydantic>=2.10.6,<3.0.0",
"httpx>=0.28.1,<0.29.0",
"validators>=0.34.0,<0.35.0"
"validators>=0.34.0,<0.35.0",
"opik"
]

license = "MIT"
Expand Down
3 changes: 3 additions & 0 deletions src/ai_sdk/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
from .core.generate_text import generate_text
from .core.generate_object import generate_object
from .core.utils import is_opik_configured

is_opik_configured()

__all__ = ["generate_text", "generate_object"]

Expand Down
13 changes: 13 additions & 0 deletions src/ai_sdk/anthropic/chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
import datetime
import uuid
from typing import Tuple
import opik
from opik import opik_context

SUPPORTED_MODELS = [
"claude-3-7-sonnet-20250219",
"claude-3-5-sonnet-20241022",
Expand Down Expand Up @@ -197,6 +200,7 @@ def _convert_messages(self, messages: List[Message]) -> Tuple[List[Dict[str, Any

return res, system

@opik.track
def _get_args(self, options: LanguageModelCallOptions):
warnings = []

Expand Down Expand Up @@ -304,6 +308,7 @@ def supports_tool_calls(self) -> bool:
return True
return False

@opik.track(type="llm")
def do_generate(self, options: LanguageModelCallOptions) -> LanguageModelCallResult:
args, warnings = self._get_args(options)

Expand All @@ -326,6 +331,14 @@ def do_generate(self, options: LanguageModelCallOptions) -> LanguageModelCallRes
response_body = result,
is_retryable = self._is_retryable(response.status_code)
)

# Log the usage
opik_context.update_current_span(
usage={
"prompt_tokens": result["usage"]["prompt_tokens"],
"completion_tokens": result["usage"]["completion_tokens"]
}
)

return LanguageModelCallResult(
text = result["content"][0]["text"] if result["content"][0]["type"] == "text" else "",
Expand Down
2 changes: 2 additions & 0 deletions src/ai_sdk/core/convert_response.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from typing import List, Optional, Dict, Any, Callable
from .types import AssistantMessage, ResponseMessage, TextPart, ToolCallPart, ToolResultPart, ToolMessage
import opik

@opik.track
def convert_to_response_messages(
text: Optional[str] = "",
tools: Dict[str, Any] = None,
Expand Down
4 changes: 4 additions & 0 deletions src/ai_sdk/core/generate_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@
from pydantic import BaseModel
from .types import Tool, ObjectResult
import json
import opik

@opik.track
def _parse_responses(object_generation_mode: str, res: LanguageModelCallResult, schema: BaseModel) -> BaseModel:
if object_generation_mode == "json" or object_generation_mode == "text":
if res.text:
Expand Down Expand Up @@ -59,6 +61,7 @@ def _parse_responses(object_generation_mode: str, res: LanguageModelCallResult,

return object

@opik.track
def _inject_json_schema(prompt: Optional[str], schema: BaseModel) -> str:
DEFAULT_SCHEMA_PREFIX = 'JSON schema:'
DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above. Do not include any other text, only the JSON object and DO NOT return the data in markdown format."
Expand All @@ -74,6 +77,7 @@ def _inject_json_schema(prompt: Optional[str], schema: BaseModel) -> str:
# Filter out None values and join with newlines
return '\n'.join(line for line in components if line is not None)

@opik.track
def generate_object(
model: LanguageModel,
schema: BaseModel,
Expand Down
2 changes: 2 additions & 0 deletions src/ai_sdk/core/generate_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@
from .errors import AI_APICallError
from .convert_response import convert_to_response_messages
import time
import opik

@opik.track
def generate_text(
model: LanguageModel,
system: Optional[str] = None,
Expand Down
2 changes: 2 additions & 0 deletions src/ai_sdk/core/tool_calls.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@
from .types import ToolCallPart, ToolResultPart, Tool
from .errors import AI_ToolExecutionError
import json
import opik

@opik.track
def execute_tool_calls(
tool_calls: List[ToolCallPart],
tools: Dict[str, Tool]
Expand Down
25 changes: 23 additions & 2 deletions src/ai_sdk/core/utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
from typing import List, Optional
from typing import List, Optional, Tuple
from .types import Message, SystemMessage, UserMessage
import os
import opik
import logging

LOGGER = logging.getLogger(__name__)

@opik.track
def standardize_messages(
system: Optional[str],
prompt: Optional[str],
Expand Down Expand Up @@ -36,4 +41,20 @@ def load_api_key(
if api_key is None:
raise ValueError(f"{description} API key is missing. Pass it using the '{api_key_parameter_name}' parameter or the {env_var_name} environment variable.")

return api_key
return api_key

def is_opik_configured() -> Tuple[bool, Optional[str]]:
try:
import opik
except ImportError:
os.environ["OPIK_TRACK_DISABLE"] = "true"
return False

_client = opik.Opik(_show_misconfiguration_message=False)

try:
_client.auth_check()
return True
except Exception as e: # noqa: BLE001
LOGGER.warning("Opik is not configured, run `opik configure` in your terminal or call `opik.configure()` in your code.")
return False
18 changes: 16 additions & 2 deletions src/ai_sdk/openai/chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import json
import datetime
import validators

import opik
from opik import opik_context

class OpenAIChatSettings(BaseModel):
logit_bias: Optional[Dict[float, float]] = None
Expand Down Expand Up @@ -67,7 +68,7 @@ def __init__(self, model_id: str, settings: OpenAIChatSettings, config: OpenAICh
if model_id not in SUPPORTED_MODELS:
raise AI_UnsupportedFunctionalityError(
functionality="Model",
reason=f"This model is not supported: {model_id}"
message=f"This model is not supported: {model_id}"
)
self.default_object_generation_mode = "json"
self.settings = settings
Expand Down Expand Up @@ -174,6 +175,7 @@ def supports_tool_calls(self) -> bool:
return True
return False

@opik.track
def _convert_tool_calls_to_openai_format(self, tool_calls: list[ToolCallPart]) -> list[Dict[str, Any]]:
"""
Converts internal ToolCallPart format to OpenAI's tool_calls format.
Expand Down Expand Up @@ -204,6 +206,7 @@ def _convert_tool_calls_to_openai_format(self, tool_calls: list[ToolCallPart]) -

return openai_tool_calls

@opik.track
def _convert_messages(self, messages: List[Message]) -> List[Dict[str, Any]]:
res = []

Expand Down Expand Up @@ -278,6 +281,7 @@ def _convert_messages(self, messages: List[Message]) -> List[Dict[str, Any]]:
})
return res

@opik.track
def _parse_tool_calls(self, result: Any) -> List[ToolCallPart]:
tool_calls = []

Expand All @@ -295,6 +299,8 @@ def _parse_tool_calls(self, result: Any) -> List[ToolCallPart]:
))
return tool_calls


@opik.track(type="llm")
def do_generate(self, options: LanguageModelCallOptions) -> LanguageModelCallResult:
args, warnings = self._get_args(options)

Expand All @@ -317,6 +323,14 @@ def do_generate(self, options: LanguageModelCallOptions) -> LanguageModelCallRes
is_retryable = self._is_retryable(response.status_code)
)

# Log the usage
opik_context.update_current_span(
usage={
"prompt_tokens": result["usage"]["prompt_tokens"],
"completion_tokens": result["usage"]["completion_tokens"]
}
)

return LanguageModelCallResult(
text = result["choices"][0]["message"]["content"],
finish_reason = self._convert_finish_reason(result["choices"][0]["finish_reason"]),
Expand Down
Loading
Loading