diff --git a/examples/openai/README.md b/examples/openai/README.md index 3208d12..fe2d39c 100644 --- a/examples/openai/README.md +++ b/examples/openai/README.md @@ -24,20 +24,26 @@ This directory contains examples of using OpenAI's API with environmental impact ```bash # Basic chat -python openai-chat.py --model "gpt-4" --message "What is artificial intelligence?" --max-tokens 100 +uv run python -m examples.openai.openai-chat --model "gpt-4" --message "What is artificial intelligence?" # Async streaming chat -python openai-async-stream-chat.py --model "gpt-4" --message "Explain quantum computing" --stream +uv run python -m examples.openai.openai-async-stream-chat --model "gpt-4" --message "Explain quantum computing" # Generate an image -python openai-image.py --prompt "A beautiful sunset over mountains" --size "1024x1024" +uv run python -m examples.openai.openai-image --prompt "A beautiful sunset over mountains" --model "dall-e-2" --size "1024x1024" # Async image generation -python openai-async-image.py --prompt "A futuristic city" --model "dall-e-3" +uv run python -m examples.openai.openai-async-image --prompt "A futuristic city" --model "dall-e-2" --size "1024x1024" # Text to Speech -python openai-speech.py --text "Hello, welcome to the future!" --voice "alloy" --output speech.mp3 +uv run python -m examples.openai.openai-speech --text "Hello, welcome to the future!" --model "tts-1" --response-format "mp3" + +# Async Text to Speech +uv run python -m examples.openai.openai-async-speech --text "Hello, welcome to the future!" --model "tts-1" --response-format "mp3" # Audio Transcription -python openai-transcription.py --file "recording.mp3" --model "whisper-1" +uv run python -m examples.openai.openai-transcription --filename "recording.mp3" --model "whisper-1" + +# Async Audio Transcription +uv run python -m examples.openai.openai-async-transcription --filename "recording.mp3" --model "whisper-1" ``` \ No newline at end of file diff --git a/examples/openai/openai-async-speech.py b/examples/openai/openai-async-speech.py new file mode 100644 index 0000000..c3fc537 --- /dev/null +++ b/examples/openai/openai-async-speech.py @@ -0,0 +1,36 @@ +import asyncio + +from openai import AsyncOpenAI +from scope3ai import Scope3AI + + +async def main(text: str, model: str, response_format: str): + client = AsyncOpenAI() + scope3 = Scope3AI.init() + + with scope3.trace() as tracer: + response = await client.audio.speech.create( + model=model, + voice="alloy", + input=text, + response_format=response_format, + ) + print(response) + impact = tracer.impact() + print(impact) + print(f"Total Energy Wh: {impact.total_energy_wh}") + print(f"Total GCO2e: {impact.total_gco2e}") + print(f"Total MLH2O: {impact.total_mlh2o}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="OpenAI Speech to Text") + parser.add_argument("--model", type=str, default="whisper-1", help="Model") + parser.add_argument( + "--response-format", type=str, default="json", help="Response format" + ) + parser.add_argument("--text", type=str, help="The text to convert to speech") + args = parser.parse_args() + asyncio.run(main(**vars(args))) diff --git a/examples/openai/openai-async-stream-chat.py b/examples/openai/openai-async-stream-chat.py index 31ef51f..bddd78d 100644 --- a/examples/openai/openai-async-stream-chat.py +++ b/examples/openai/openai-async-stream-chat.py @@ -5,26 +5,24 @@ from scope3ai import Scope3AI -async def main(model: str, message: str, role: str, stream: bool): +async def main(model: str, message: str, role: str): client = AsyncOpenAI() scope3 = Scope3AI.init() with scope3.trace() as tracer: + chunk_count = 0 response = await client.chat.completions.create( model=model, messages=[{"role": role, "content": message}], - stream=stream, + stream=True, ) - - if stream: - async for event in response: - if not event.choices: - continue - print(event.choices[0].delta.content, end="", flush=True) - print() - else: - print(response.choices[0].message.content.strip()) - + async for event in response: + chunk_count += 1 + if not event.choices: + continue + print(event.choices[0].delta.content, end="", flush=True) + print() + print(f"Chunk count: {chunk_count}") impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") @@ -56,12 +54,5 @@ async def main(model: str, message: str, role: str, stream: bool): default="user", help="Role for the message (user, system, or assistant)", ) - parser.add_argument( - "--no-stream", - action="store_false", - dest="stream", - help="Disable streaming mode", - default=True, - ) args = parser.parse_args() asyncio.run(main(**vars(args))) diff --git a/examples/openai/openai-async-transcription.py b/examples/openai/openai-async-transcription.py new file mode 100644 index 0000000..ef36326 --- /dev/null +++ b/examples/openai/openai-async-transcription.py @@ -0,0 +1,36 @@ +from pathlib import Path +import asyncio + +from openai import AsyncOpenAI +from scope3ai import Scope3AI + + +async def main(filename: Path, model: str, response_format: str): + client = AsyncOpenAI() + scope3 = Scope3AI.init() + + with scope3.trace() as tracer: + response = await client.audio.transcriptions.create( + model=model, + file=filename, + response_format=response_format, + ) + print(response) + impact = tracer.impact() + print(impact) + print(f"Total Energy Wh: {impact.total_energy_wh}") + print(f"Total GCO2e: {impact.total_gco2e}") + print(f"Total MLH2O: {impact.total_mlh2o}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="OpenAI Speech to Text") + parser.add_argument("--model", type=str, default="whisper-1", help="Model") + parser.add_argument( + "--response_format", type=str, default="json", help="Response format" + ) + parser.add_argument("--filename", type=Path, help="The path to the input file") + args = parser.parse_args() + asyncio.run(main(**vars(args))) diff --git a/examples/openai/openai-speech.py b/examples/openai/openai-speech.py new file mode 100644 index 0000000..5eb3f81 --- /dev/null +++ b/examples/openai/openai-speech.py @@ -0,0 +1,35 @@ +from openai import OpenAI +from scope3ai import Scope3AI + + +def main(text: str, model: str, response_format: str): + client = OpenAI() + scope3 = Scope3AI.init() + + with scope3.trace() as tracer: + response = client.audio.speech.create( + model=model, + voice="alloy", + input=text, + response_format=response_format, + ) + print(response) + print(response.scope3ai.request) + impact = tracer.impact() + print(impact) + print(f"Total Energy Wh: {impact.total_energy_wh}") + print(f"Total GCO2e: {impact.total_gco2e}") + print(f"Total MLH2O: {impact.total_mlh2o}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="OpenAI Speech to Text") + parser.add_argument("--model", type=str, default="whisper-1", help="Model") + parser.add_argument( + "--response-format", type=str, default="json", help="Response format" + ) + parser.add_argument("--text", type=str, help="The text to convert to speech") + args = parser.parse_args() + main(**vars(args)) diff --git a/examples/openai/openai-stt.py b/examples/openai/openai-transcription.py similarity index 92% rename from examples/openai/openai-stt.py rename to examples/openai/openai-transcription.py index 3fb1248..50df34f 100644 --- a/examples/openai/openai-stt.py +++ b/examples/openai/openai-transcription.py @@ -31,6 +31,6 @@ def main(filename: Path, model: str, response_format: str): parser.add_argument( "--response_format", type=str, default="json", help="Response format" ) - parser.add_argument("filename", type=Path, help="The path to the input file") + parser.add_argument("--filename", type=Path, help="The path to the input file") args = parser.parse_args() main(**vars(args)) diff --git a/scope3ai/lib.py b/scope3ai/lib.py index 35dc4a0..8f75edd 100644 --- a/scope3ai/lib.py +++ b/scope3ai/lib.py @@ -436,6 +436,13 @@ def set_only_if(row, field, *values): ) # copy tracer or global metadata + + set_only_if( + row, + "managed_service_id", + row.managed_service_id if row.managed_service_id else "", + ) + set_only_if( row, "client_id", diff --git a/scope3ai/tracers/anthropic/chat.py b/scope3ai/tracers/anthropic/chat.py index 426ff99..8a201c1 100644 --- a/scope3ai/tracers/anthropic/chat.py +++ b/scope3ai/tracers/anthropic/chat.py @@ -18,10 +18,8 @@ from typing_extensions import override from scope3ai.api.types import Scope3AIContext, ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI -PROVIDER = PROVIDERS.ANTROPIC.value MessageStreamT = TypeVar("MessageStreamT", bound=_MessageStream) AsyncMessageStreamT = TypeVar("AsyncMessageStreamT", bound=_AsyncMessageStream) @@ -63,7 +61,6 @@ def __stream_text__(self) -> Iterator[str]: # type: ignore[misc] input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=requests_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row) @@ -106,7 +103,6 @@ async def __stream_text__(self) -> AsyncIterator[str]: # type: ignore[misc] input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=requests_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row) @@ -177,7 +173,6 @@ def __stream__(self) -> Iterator[_T]: input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row) @@ -210,7 +205,6 @@ async def __stream__(self) -> AsyncIterator[_T]: input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row) @@ -229,7 +223,6 @@ def _anthropic_chat_wrapper(response: Message, request_latency: float) -> Messag input_tokens=response.usage.input_tokens, output_tokens=response.usage.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) if scope3ai_ctx is not None: @@ -263,7 +256,6 @@ async def _anthropic_async_chat_wrapper( input_tokens=response.usage.input_tokens, output_tokens=response.usage.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) if scope3ai_ctx is not None: diff --git a/scope3ai/tracers/cohere/chat.py b/scope3ai/tracers/cohere/chat.py index 6e3e9c8..473f93d 100644 --- a/scope3ai/tracers/cohere/chat.py +++ b/scope3ai/tracers/cohere/chat.py @@ -11,12 +11,9 @@ StreamEndStreamedChatResponse as _StreamEndStreamedChatResponse, ) -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.api.types import Scope3AIContext, ImpactRow -PROVIDER = PROVIDERS.COHERE.value - class NonStreamedChatResponse(_NonStreamedChatResponse): scope3ai: Optional[Scope3AIContext] = None @@ -47,7 +44,6 @@ def cohere_chat_wrapper( input_tokens=response.meta.tokens.input_tokens, output_tokens=response.meta.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) return NonStreamedChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -68,7 +64,6 @@ async def cohere_async_chat_wrapper( input_tokens=response.meta.tokens.input_tokens, output_tokens=response.meta.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) return NonStreamedChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -93,7 +88,6 @@ def cohere_stream_chat_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) yield StreamEndStreamedChatResponse(**event.dict(), scope3ai=scope3ai_ctx) @@ -120,7 +114,6 @@ async def cohere_async_stream_chat_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) yield StreamEndStreamedChatResponse(**event.dict(), scope3ai=scope3ai_ctx) diff --git a/scope3ai/tracers/cohere/chat_v2.py b/scope3ai/tracers/cohere/chat_v2.py index b7950c8..e757db3 100644 --- a/scope3ai/tracers/cohere/chat_v2.py +++ b/scope3ai/tracers/cohere/chat_v2.py @@ -11,11 +11,8 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI -PROVIDER = PROVIDERS.COHERE.value - class ChatResponse(_ChatResponse): scope3ai: Optional[Scope3AIContext] = None @@ -50,7 +47,6 @@ def cohere_chat_v2_wrapper( input_tokens=response.usage.tokens.input_tokens, output_tokens=response.usage.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) return ChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -71,7 +67,6 @@ async def cohere_async_chat_v2_wrapper( input_tokens=response.usage.tokens.input_tokens, output_tokens=response.usage.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) return ChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -97,7 +92,6 @@ def cohere_stream_chat_v2_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) yield Scope3AIStreamedChatResponseV2(type="scope3ai", scope3ai=scope3ai_ctx) @@ -123,7 +117,6 @@ async def cohere_async_stream_chat_v2_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) yield Scope3AIStreamedChatResponseV2(type="scope3ai", scope3ai=scope3ai_ctx) diff --git a/scope3ai/tracers/google_genai/chat.py b/scope3ai/tracers/google_genai/chat.py index 12c9a45..2a523be 100644 --- a/scope3ai/tracers/google_genai/chat.py +++ b/scope3ai/tracers/google_genai/chat.py @@ -5,11 +5,8 @@ from scope3ai.api.types import Scope3AIContext from scope3ai.api.typesgen import ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI -PROVIDER = PROVIDERS.GOOGLE_GENAI.value - class GenerateContentResponse(_GenerateContentResponse): scope3ai: Optional[Scope3AIContext] = None @@ -21,7 +18,6 @@ def get_impact_row(response: _GenerateContentResponse, duration_ms: float) -> Im input_tokens=response.usage_metadata.prompt_token_count, output_tokens=response.usage_metadata.candidates_token_count or 0, request_duration_ms=duration_ms * 1000, - managed_service_id=PROVIDER, ) diff --git a/scope3ai/tracers/huggingface/chat.py b/scope3ai/tracers/huggingface/chat.py index 3b09d2f..02ea8cf 100644 --- a/scope3ai/tracers/huggingface/chat.py +++ b/scope3ai/tracers/huggingface/chat.py @@ -13,11 +13,9 @@ from requests import Response from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_CHAT_TASK = "chat" @@ -64,7 +62,6 @@ def huggingface_chat_wrapper_non_stream( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) chat = ChatCompletionOutput(**asdict(response)) @@ -90,7 +87,6 @@ def huggingface_chat_wrapper_stream( model_id=model, output_tokens=token_count, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) chunk_data = ChatCompletionStreamOutput(**asdict(chunk)) scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) @@ -127,7 +123,6 @@ async def huggingface_async_chat_wrapper_non_stream( input_tokens=response.usage.prompt_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) @@ -152,7 +147,6 @@ async def huggingface_async_chat_wrapper_stream( output_tokens=token_count, request_duration_ms=request_latency * 1000, # TODO: can we get the header that has the processing time - managed_service_id=PROVIDER, ) scope3_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chunk_data = ChatCompletionStreamOutput(**asdict(chunk)) diff --git a/scope3ai/tracers/huggingface/image_to_image.py b/scope3ai/tracers/huggingface/image_to_image.py index 096a0b9..36adfc7 100644 --- a/scope3ai/tracers/huggingface/image_to_image.py +++ b/scope3ai/tracers/huggingface/image_to_image.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_IMAGE_TO_IMAGE_TASK = "chat" @@ -64,7 +62,6 @@ def _hugging_face_image_to_image_get_impact_row( input_tokens=int(input_tokens), task=Task.image_generation, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, output_images=[RootImage(root=f"{output_width}x{output_height}")], input_images=input_images, ) diff --git a/scope3ai/tracers/huggingface/speech_to_text.py b/scope3ai/tracers/huggingface/speech_to_text.py index 7e24dad..d211c92 100644 --- a/scope3ai/tracers/huggingface/speech_to_text.py +++ b/scope3ai/tracers/huggingface/speech_to_text.py @@ -14,12 +14,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_SPEECH_TO_TEXT_TASK = "automatic-speech-recognition" @@ -50,7 +48,6 @@ def _hugging_face_automatic_recognition_get_impact_row( task=Task.text_to_speech, input_audio_seconds=float(compute_audio_length), request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = AutomaticSpeechRecognitionOutput(**asdict(response)) return result, scope3_row diff --git a/scope3ai/tracers/huggingface/text_to_image.py b/scope3ai/tracers/huggingface/text_to_image.py index e0fb7d5..0ead53b 100644 --- a/scope3ai/tracers/huggingface/text_to_image.py +++ b/scope3ai/tracers/huggingface/text_to_image.py @@ -14,12 +14,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_TEXT_TO_IMAGE_TASK = "text-to-image" @@ -52,7 +50,6 @@ def _hugging_face_text_to_image_get_impact_row( task=Task.text_to_image, output_images=[RootImage(root=f"{width}x{height}")], request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = TextToImageOutput(response) diff --git a/scope3ai/tracers/huggingface/text_to_speech.py b/scope3ai/tracers/huggingface/text_to_speech.py index ba09544..dc9de60 100644 --- a/scope3ai/tracers/huggingface/text_to_speech.py +++ b/scope3ai/tracers/huggingface/text_to_speech.py @@ -13,12 +13,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_TEXT_TO_SPEECH_TASK = "text-to-speech" @@ -50,7 +48,6 @@ def _hugging_face_text_to_speech_get_impact_row( input_tokens=int(input_tokens), task=Task.text_to_speech, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = TextToSpeechOutput(audio=response, sampling_rate=16000) return result, scope3_row diff --git a/scope3ai/tracers/huggingface/translation.py b/scope3ai/tracers/huggingface/translation.py index 9be069b..0c0d154 100644 --- a/scope3ai/tracers/huggingface/translation.py +++ b/scope3ai/tracers/huggingface/translation.py @@ -13,12 +13,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_TRANSLATION_TASK = "translation" @@ -51,7 +49,6 @@ def _hugging_face_translation_get_impact_row( input_tokens=int(input_tokens), output_tokens=output_tokens, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = TranslationOutput(**asdict(response)) return result, scope3_row diff --git a/scope3ai/tracers/huggingface/vision/image_classification.py b/scope3ai/tracers/huggingface/vision/image_classification.py index a0b477c..1db12fc 100644 --- a/scope3ai/tracers/huggingface/vision/image_classification.py +++ b/scope3ai/tracers/huggingface/vision/image_classification.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_IMAGE_CLASSIFICATION_TASK = "image-classification" @@ -59,7 +57,6 @@ def _hugging_face_image_classification_get_impact_row( task=Task.image_classification, output_images=[], # No images to output in classification request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, input_images=input_images, ) result = ImageClassificationOutput(elements=response) diff --git a/scope3ai/tracers/huggingface/vision/image_segmentation.py b/scope3ai/tracers/huggingface/vision/image_segmentation.py index 817c554..456414c 100644 --- a/scope3ai/tracers/huggingface/vision/image_segmentation.py +++ b/scope3ai/tracers/huggingface/vision/image_segmentation.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_IMAGE_SEGMENTATION_TASK = "image-segmentation" @@ -58,7 +56,6 @@ def _hugging_face_image_segmentation_get_impact_row( input_tokens=0, task=Task.image_segmentation, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, input_images=input_images, ) result = ImageSegmentationOutput() diff --git a/scope3ai/tracers/huggingface/vision/object_detection.py b/scope3ai/tracers/huggingface/vision/object_detection.py index 533af97..b826c37 100644 --- a/scope3ai/tracers/huggingface/vision/object_detection.py +++ b/scope3ai/tracers/huggingface/vision/object_detection.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_OBJECT_DETECTION_TASK = "object-detection" @@ -58,7 +56,6 @@ def _hugging_face_object_detection_get_impact_row( input_tokens=0, # No token usage for object detection task=Task.object_detection, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, input_images=input_images, ) result = ObjectDetectionOutput(elements=response) diff --git a/scope3ai/tracers/litellm/chat.py b/scope3ai/tracers/litellm/chat.py index 3aece11..8efc4d7 100644 --- a/scope3ai/tracers/litellm/chat.py +++ b/scope3ai/tracers/litellm/chat.py @@ -8,13 +8,11 @@ from scope3ai import Scope3AI from scope3ai.api.types import Scope3AIContext, ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.multimodal import ( aggregate_multimodal, aggregate_multimodal_audio_content_output, ) -PROVIDER = PROVIDERS.LITELLM.value logger = logging.getLogger("scope3ai.tracers.litellm.chat") @@ -56,7 +54,6 @@ def litellm_chat_wrapper_stream( # type: ignore[misc] model_id=model, output_tokens=token_count, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) if scope3ai_ctx is not None: @@ -90,7 +87,6 @@ def litellm_chat_wrapper_non_stream( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.total_tokens, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -143,7 +139,6 @@ async def litellm_async_chat_wrapper_base( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.total_tokens, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -184,7 +179,6 @@ async def litellm_async_chat_wrapper_stream( # type: ignore[misc] model_id=model, output_tokens=token_count, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) if scope3ai_ctx is not None: diff --git a/scope3ai/tracers/litellm/speech_to_text.py b/scope3ai/tracers/litellm/speech_to_text.py index 0aa203b..e7c7001 100644 --- a/scope3ai/tracers/litellm/speech_to_text.py +++ b/scope3ai/tracers/litellm/speech_to_text.py @@ -9,11 +9,8 @@ from scope3ai.api.types import ImpactRow from scope3ai.api.types import Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.LITELLM.value - class TranscriptionResponse(_TranscriptionResponse): scope3ai: Optional[Scope3AIContext] = None @@ -39,7 +36,6 @@ def litellm_speech_to_text_get_impact_row( model_id=model, output_tokens=output_tokens, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, task=Task.speech_to_text, **options, ) diff --git a/scope3ai/tracers/litellm/text_to_image.py b/scope3ai/tracers/litellm/text_to_image.py index b3c274b..a92358d 100644 --- a/scope3ai/tracers/litellm/text_to_image.py +++ b/scope3ai/tracers/litellm/text_to_image.py @@ -8,9 +8,7 @@ from scope3ai import Scope3AI from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage, Task -from scope3ai.constants import PROVIDERS -PROVIDER = PROVIDERS.LITELLM.value DEFAULT_MODEL = "dall-e-2" DEFAULT_SIZE = "1024x1024" DEFAULT_N = 1 @@ -40,7 +38,6 @@ def litellm_image_generation_get_impact_row( model_id=model or DEFAULT_MODEL, task=Task.text_to_image, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, output_images=[size] * n, input_tokens=input_tokens, ) diff --git a/scope3ai/tracers/litellm/text_to_speech.py b/scope3ai/tracers/litellm/text_to_speech.py index e861292..7324982 100644 --- a/scope3ai/tracers/litellm/text_to_speech.py +++ b/scope3ai/tracers/litellm/text_to_speech.py @@ -7,11 +7,8 @@ from scope3ai import Scope3AI from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.audio import _get_audio_duration -PROVIDER = PROVIDERS.LITELLM.value - class HttpxBinaryResponseContent(_legacy_response.HttpxBinaryResponseContent): scope3ai: Optional[Scope3AIContext] = None @@ -41,7 +38,6 @@ def litellm_speech_generation_get_impact_row( scope3_row = ImpactRow( model_id=model, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, **options, ) return scope3_row diff --git a/scope3ai/tracers/mistralai/chat.py b/scope3ai/tracers/mistralai/chat.py index ca522f2..aa8935b 100644 --- a/scope3ai/tracers/mistralai/chat.py +++ b/scope3ai/tracers/mistralai/chat.py @@ -11,11 +11,9 @@ from scope3ai import Scope3AI from scope3ai.api.types import Scope3AIContext from scope3ai.api.typesgen import ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.multimodal import aggregate_multimodal -PROVIDER = PROVIDERS.MISTRALAI.value -PROVIDER = "" + logger = logging.getLogger("scope3ai.tracers.mistralai.chat") @@ -41,7 +39,6 @@ def mistralai_v1_chat_wrapper( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) messages = args[1] if len(args) > 1 else kwargs.get("messages") @@ -71,7 +68,6 @@ def mistralai_v1_chat_wrapper_stream( input_tokens=chunk.data.usage.prompt_tokens, output_tokens=chunk.data.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) chunk.data = CompletionChunk(**chunk.data.model_dump(), scope3ai=scope3ai_ctx) @@ -92,7 +88,6 @@ async def mistralai_v1_async_chat_wrapper( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chat = ChatCompletionResponse(**response.model_dump()) @@ -114,7 +109,6 @@ async def _generator( input_tokens=chunk.data.usage.prompt_tokens, output_tokens=chunk.data.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chunk.data = CompletionChunk(**chunk.data.model_dump(), scope3ai=scope3ai_ctx) diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 0bd29c6..9543549 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -9,14 +9,12 @@ from openai.types.chat import ChatCompletionChunk as _ChatCompletionChunk from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.utils.multimodal import ( aggregate_multimodal, aggregate_multimodal_audio_content_output, ) -PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger("scope3ai.tracers.openai.chat") @@ -47,7 +45,6 @@ def _openai_chat_wrapper( input_tokens=http_response.get("usage", {}).get("prompt_tokens"), output_tokens=http_response.get("usage", {}).get("completion_tokens"), request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -70,7 +67,6 @@ def _openai_chat_wrapper( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -138,7 +134,6 @@ def openai_chat_wrapper_stream( output_tokens=chunk.usage.completion_tokens, request_duration_ms=request_latency * 1000, # TODO: can we get the header that has the processing time - managed_service_id=PROVIDER, ) scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) @@ -195,7 +190,6 @@ async def openai_async_chat_wrapper_stream( output_tokens=chunk.usage.completion_tokens, request_duration_ms=request_latency * 1000, # TODO: can we get the header that has the processing time - managed_service_id=PROVIDER, ) scope3_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) diff --git a/scope3ai/tracers/openai/speech_to_text.py b/scope3ai/tracers/openai/speech_to_text.py index 0af5fb1..31410e8 100644 --- a/scope3ai/tracers/openai/speech_to_text.py +++ b/scope3ai/tracers/openai/speech_to_text.py @@ -1,6 +1,6 @@ import logging import time -from typing import Any, Callable, Optional, Union +from typing import Any, Callable, Optional, Tuple, Union import tiktoken from openai.resources.audio.transcriptions import AsyncTranscriptions, Transcriptions @@ -10,13 +10,10 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.OPENAI.value - logger = logging.getLogger("scope3.tracers.openai.speech_to_text") @@ -34,7 +31,7 @@ class TranscriptionVerbose(BaseModelResponse, _TranscriptionVerbose): def _openai_speech_to_text_get_impact_row( response: Any, request_latency: float, kwargs: dict -) -> (Union[Transcription, TranscriptionVerbose, str], ImpactRow): +) -> Tuple[Union[Transcription, TranscriptionVerbose, str], ImpactRow]: model = kwargs["model"] encoder = tiktoken.get_encoding("cl100k_base") @@ -52,7 +49,6 @@ def _openai_speech_to_text_get_impact_row( scope3_row = ImpactRow( model_id=model, - managed_service_id=PROVIDER, output_tokens=output_tokens, request_duration_ms=request_latency, task=Task.speech_to_text, diff --git a/scope3ai/tracers/openai/text_to_image.py b/scope3ai/tracers/openai/text_to_image.py index ba4de5f..41eb0a0 100644 --- a/scope3ai/tracers/openai/text_to_image.py +++ b/scope3ai/tracers/openai/text_to_image.py @@ -1,15 +1,13 @@ import time -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, Tuple from openai.resources.images import AsyncImages, Images from openai.types.images_response import ImagesResponse as _ImageResponse - from scope3ai.api.types import ImpactRow, Scope3AIContext, Task from scope3ai.api.typesgen import Image as RootImage from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse -PROVIDER = "openai" DEFAULT_MODEL = "dall-e-2" DEFAULT_SIZE = "1024x1024" DEFAULT_N = 1 @@ -21,7 +19,7 @@ class ImageResponse(BaseModelResponse, _ImageResponse): def _openai_image_get_impact_row( response: _ImageResponse, request_latency: float, **kwargs: Any -) -> (ImageResponse, ImpactRow): +) -> Tuple[ImageResponse, ImpactRow]: model = kwargs.get("model", DEFAULT_MODEL) size = RootImage(root=kwargs.get("size", DEFAULT_SIZE)) n = kwargs.get("n", DEFAULT_N) @@ -31,7 +29,6 @@ def _openai_image_get_impact_row( task=Task.text_to_image, output_images=[size] * n, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) result = ImageResponse.model_construct(**response.model_dump()) diff --git a/scope3ai/tracers/openai/text_to_speech.py b/scope3ai/tracers/openai/text_to_speech.py index ff10bee..a8f4187 100644 --- a/scope3ai/tracers/openai/text_to_speech.py +++ b/scope3ai/tracers/openai/text_to_speech.py @@ -1,18 +1,15 @@ import logging import time -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, Tuple import tiktoken from openai.resources.audio.speech import AsyncSpeech, Speech, _legacy_response from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_audio_duration -PROVIDER = PROVIDERS.OPENAI.value - logger = logging.getLogger(f"scope3ai.tracers.{__name__}") @@ -26,7 +23,7 @@ def _openai_text_to_speech_get_impact_row( response: _legacy_response.HttpxBinaryResponseContent, request_latency: float, kwargs: Any, -) -> (HttpxBinaryResponseContent, ImpactRow): +) -> Tuple[HttpxBinaryResponseContent, ImpactRow]: # try getting duration response_format = kwargs.get("response_format", "mp3") duration = _get_audio_duration(response_format, response.content) @@ -46,7 +43,6 @@ def _openai_text_to_speech_get_impact_row( model_id=model_requested, input_tokens=input_tokens, request_duration_ms=request_latency, - managed_service_id=PROVIDER, output_audio_seconds=duration, task=Task.text_to_speech, ) diff --git a/scope3ai/tracers/openai/translation.py b/scope3ai/tracers/openai/translation.py index c56449d..6084c26 100644 --- a/scope3ai/tracers/openai/translation.py +++ b/scope3ai/tracers/openai/translation.py @@ -10,13 +10,10 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.OPENAI.value - logger = logging.getLogger(__name__) @@ -52,7 +49,6 @@ def _openai_translation_get_impact_row( scope3_row = ImpactRow( model_id=model, - managed_service_id=PROVIDER, output_tokens=output_tokens, request_duration_ms=request_latency, task=Task.translation, diff --git a/tests/test_litellm_multimodal.py b/tests/test_litellm_multimodal.py index 2a455cb..b14667b 100644 --- a/tests/test_litellm_multimodal.py +++ b/tests/test_litellm_multimodal.py @@ -2,7 +2,6 @@ import litellm from scope3ai.api.typesgen import Image -from scope3ai.constants import PROVIDERS from tests.utils import ( load_image_b64, TEST_IMAGE_PNG, @@ -41,7 +40,8 @@ def test_litellm_multimodal_vision_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 872 assert response.scope3ai.request.output_tokens == 931 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -87,7 +87,8 @@ def test_litellm_multimodal_vision_2_images_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 1082 assert response.scope3ai.request.output_tokens == 1136 assert response.scope3ai.request.input_images == [ @@ -129,7 +130,8 @@ def test_litellm_multimodal_audio_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 29 assert response.scope3ai.request.output_tokens == 39 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -175,7 +177,8 @@ def test_litellm_multimodal_audio_2_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 46 assert response.scope3ai.request.output_tokens == 81 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -214,7 +217,8 @@ def test_litellm_multimodal_vision_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 4172 assert response.scope3ai.request.output_tokens == 4253 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -260,7 +264,8 @@ def test_litellm_multimodal_vision_2_images_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 5228 assert response.scope3ai.request.output_tokens == 5337 assert response.scope3ai.request.input_images == [ diff --git a/tests/test_litellm_multimodal_output.py b/tests/test_litellm_multimodal_output.py index 7ad735f..16ae2fc 100644 --- a/tests/test_litellm_multimodal_output.py +++ b/tests/test_litellm_multimodal_output.py @@ -1,6 +1,5 @@ import litellm import pytest -from scope3ai.constants import PROVIDERS @pytest.mark.vcr @@ -16,7 +15,9 @@ def test_litellm_multimodal_output_openai(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 @@ -43,7 +44,8 @@ def test_litellm_multimodal_output_default(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 @@ -72,7 +74,9 @@ async def test_litellm_multimodal_output_openai_async( ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 @@ -102,7 +106,8 @@ async def test_litellm_multimodal_output_default_async( ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 diff --git a/tests/test_litellm_multimodal_use_default.py b/tests/test_litellm_multimodal_use_default.py index b1d9ceb..1253242 100644 --- a/tests/test_litellm_multimodal_use_default.py +++ b/tests/test_litellm_multimodal_use_default.py @@ -2,7 +2,6 @@ import litellm from scope3ai.api.typesgen import Image -from scope3ai.constants import PROVIDERS from tests.utils import ( load_image_b64, TEST_IMAGE_PNG, @@ -41,7 +40,9 @@ def test_litellm_multimodal_vision_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 872 assert response.scope3ai.request.output_tokens == 59 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -87,7 +88,9 @@ def test_litellm_multimodal_vision_2_images_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 1082 assert response.scope3ai.request.output_tokens == 54 assert response.scope3ai.request.input_images == [ @@ -129,7 +132,9 @@ def test_litellm_multimodal_audio_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 29 assert response.scope3ai.request.output_tokens == 10 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -175,7 +180,9 @@ def test_litellm_multimodal_audio_2_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 46 assert response.scope3ai.request.output_tokens == 35 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -214,7 +221,9 @@ def test_litellm_multimodal_vision_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 4172 assert response.scope3ai.request.output_tokens == 81 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -260,7 +269,9 @@ def test_litellm_multimodal_vision_2_images_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 5228 assert response.scope3ai.request.output_tokens == 109 assert response.scope3ai.request.input_images == [ diff --git a/tests/test_litellm_tracer.py b/tests/test_litellm_tracer.py index 91eb75f..2090fa2 100644 --- a/tests/test_litellm_tracer.py +++ b/tests/test_litellm_tracer.py @@ -3,7 +3,6 @@ import litellm import pytest -from scope3ai.constants import PROVIDERS USE_ALWAYS_LITELLM_TRACER = True @@ -98,7 +97,8 @@ def test_litellm_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 8 assert response.scope3ai.request.request_duration_ms > 0 @@ -122,7 +122,8 @@ async def test_litellm_async_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 6 assert response.scope3ai.request.request_duration_ms > 0 @@ -150,7 +151,8 @@ def test_litellm_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -177,7 +179,8 @@ async def test_litellm_async_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -202,7 +205,8 @@ def test_litellm_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 @@ -229,7 +233,8 @@ async def test_litellm_async_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 diff --git a/tests/test_litellm_tracer_use_default.py b/tests/test_litellm_tracer_use_default.py index 16c8315..a480102 100644 --- a/tests/test_litellm_tracer_use_default.py +++ b/tests/test_litellm_tracer_use_default.py @@ -3,7 +3,6 @@ import litellm import pytest -from scope3ai.constants import PROVIDERS USE_ALWAYS_LITELLM_TRACER = False @@ -18,7 +17,8 @@ def test_litellm_chat(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 44 assert response.scope3ai.request.output_tokens == 69 assert response.scope3ai.impact is not None @@ -40,7 +40,8 @@ async def test_litellm_async_chat(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 3 assert response.scope3ai.impact is not None assert response.scope3ai.impact.total_impact is not None @@ -100,7 +101,9 @@ def test_litellm_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.impact is not None @@ -123,7 +126,9 @@ async def test_litellm_async_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.impact is not None @@ -150,7 +155,9 @@ def test_litellm_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -177,7 +184,9 @@ async def test_litellm_async_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -202,7 +211,9 @@ def test_litellm_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 @@ -229,7 +240,9 @@ async def test_litellm_async_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 diff --git a/tests/test_openai_multimodal_output.py b/tests/test_openai_multimodal_output.py index 0bec5f0..de593b1 100644 --- a/tests/test_openai_multimodal_output.py +++ b/tests/test_openai_multimodal_output.py @@ -1,5 +1,4 @@ import pytest -from scope3ai.constants import PROVIDERS @pytest.mark.vcr @@ -18,7 +17,9 @@ def test_openai_multimodal_output(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0