From d83659b8295284045f34db48632545fc68bcbb1c Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 11:53:33 -0500 Subject: [PATCH 01/14] fix(tracer): tracer impact duplication fix --- examples/mistralai/mistral-complete.py | 6 +- scope3ai/lib.py | 2 - scope3ai/tracers/mistralai/chat.py | 2 +- tests/cassettes/test_tracer_rows.yaml | 88 ++++++++++++++++++++++++++ tests/test_tracer.py | 28 ++++++++ 5 files changed, 121 insertions(+), 5 deletions(-) create mode 100644 tests/cassettes/test_tracer_rows.yaml diff --git a/examples/mistralai/mistral-complete.py b/examples/mistralai/mistral-complete.py index 20f1935..eb35014 100644 --- a/examples/mistralai/mistral-complete.py +++ b/examples/mistralai/mistral-complete.py @@ -1,5 +1,6 @@ from scope3ai import Scope3AI from mistralai import Mistral +import os def main( @@ -9,8 +10,9 @@ def main( temperature: float, api_key: str | None = None, ): + api_key = api_key or os.environ["MISTRAL_API_KEY"] scope3 = Scope3AI.init() - client = Mistral(api_key=api_key) if api_key else Mistral() + client = Mistral(api_key=api_key) with scope3.trace() as tracer: response = client.chat.complete( @@ -19,8 +21,8 @@ def main( max_tokens=max_tokens, temperature=temperature, ) - print(response.choices[0].message.content) + print(response.choices[0].message.content) impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") diff --git a/scope3ai/lib.py b/scope3ai/lib.py index a7a2143..35dc4a0 100644 --- a/scope3ai/lib.py +++ b/scope3ai/lib.py @@ -237,7 +237,6 @@ def submit_impact( ) ctx.set_impact(response.rows[0]) if ctx._tracer: - ctx._tracer.add_impact(response.rows[0]) ctx._tracer._unlink_trace(ctx) return response @@ -285,7 +284,6 @@ async def asubmit_impact( ) ctx.set_impact(response.rows[0]) if tracer: - tracer.add_impact(response.rows[0]) tracer._unlink_trace(ctx) return ctx diff --git a/scope3ai/tracers/mistralai/chat.py b/scope3ai/tracers/mistralai/chat.py index 3471d7d..3f0c59e 100644 --- a/scope3ai/tracers/mistralai/chat.py +++ b/scope3ai/tracers/mistralai/chat.py @@ -15,7 +15,7 @@ from scope3ai.tracers.utils.multimodal import aggregate_multimodal PROVIDER = PROVIDERS.MISTRALAI.value - +PROVIDER = "" logger = logging.getLogger("scope3ai.tracers.mistralai.chat") diff --git a/tests/cassettes/test_tracer_rows.yaml b/tests/cassettes/test_tracer_rows.yaml new file mode 100644 index 0000000..c0c7743 --- /dev/null +++ b/tests/cassettes/test_tracer_rows.yaml @@ -0,0 +1,88 @@ +interactions: +- request: + body: '{"max_tokens":100,"messages":[{"content":"Give me small summary of 100 + years of Loneliness","role":"user"}],"model":"mistral-large-latest","safe_prompt":false,"stream":false,"temperature":0.7,"top_p":1.0}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - DUMMY + connection: + - keep-alive + content-length: + - '204' + content-type: + - application/json + host: + - api.mistral.ai + user-agent: + - mistral-client-python/1.2.5 + method: POST + uri: https://api.mistral.ai/v1/chat/completions + response: + body: + string: !!binary | + H4sIAAAAAAAAAzyQQW4UQQxFr2J5k01PRCszDPSSTWARsYANIhLyVLmnDS5XqHLPpBlxGA7AKXIx + VE1g++u73v//ghJxwBDGPvZ84JG223G7O4y02/Wv+92u5/4l7bHDfPjKwZt3Ir8OOT0ou2TDDkNh + co449PubV/v9tr/ZdphyZMUBk1QvpBulcuSNknP1djNlCVxx+HxBsciPOLzoMHGtdGQcLliyMg5I + tUp1snbjOeuXQKoVB5tVOwzZnK2lusf3xvB2tlg4wiemUiGP8CGr+Bz5HkEqEChZTFS+geUTKxwW + uKVDEVa4pRKefhPcPf0q32f+AT6Rg7NqBZ8Y0qwumyMbF2q9SaF6LkujtPc3M1tsH4yURBcgi6s+ + 5tmi2PHZJwXS4pMEUvB8tibfUcgW8zV8nPhfMGWLFRIdV2dhUqkJzuITTNLAq84nNq8deKHQGCtw + 5V9VKFJ5jTGSKuQTFyAIbD6X5RreOfDjg+bCa7/E6171ea8ONJ+4gzOV7n+VsARdsUY+F27+v1mW + DsKsSYy8pRBb7a3eVYW2WyJn/NnhKCb1jzLii1ITi/PzlKyUclLz0ksylGpjdZRKYRFfUJSfW1AS + X5KfnZpXDEpToIgvScxBiICEEAkQIW5gUFsLAAAA//8DAFhUqoXRAgAA + headers: + CF-RAY: + - 90d46402de42c82c-TPA + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Wed, 05 Feb 2025 16:48:56 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=SYeIhz9qWk9MqmjWcxP_AUVUeqFI1w78vzx.196xHSs-1738774136-1.0.1.1-.QOPRqy_H_bxV5KHpUaEoEgXNC5sBtYXsLxjlSrmXzZ31EkHHlN84_Je6ndF60fYPE9vTCyEriEvj2jRflvGWQ; + path=/; expires=Wed, 05-Feb-25 17:18:56 GMT; domain=.mistral.ai; HttpOnly; + Secure; SameSite=None + Transfer-Encoding: + - chunked + access-control-allow-origin: + - '*' + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + ratelimitbysize-limit: + - '500000' + ratelimitbysize-query-cost: + - '113' + ratelimitbysize-remaining: + - '499887' + ratelimitbysize-reset: + - '6' + x-envoy-upstream-service-time: + - '1749' + x-kong-proxy-latency: + - '4' + x-kong-request-id: + - 16ca5326376f860af227e819ee9ef858 + x-kong-upstream-latency: + - '1750' + x-ratelimitbysize-limit-minute: + - '500000' + x-ratelimitbysize-limit-month: + - '1000000000' + x-ratelimitbysize-remaining-minute: + - '499887' + x-ratelimitbysize-remaining-month: + - '999788340' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/test_tracer.py b/tests/test_tracer.py index 8be3a3e..94ac28a 100644 --- a/tests/test_tracer.py +++ b/tests/test_tracer.py @@ -188,6 +188,34 @@ def test_tracer_context_nested(tracer_init): assert impact2.total_mlh2o > impact.total_mlh2o +@pytest.mark.vcr +def test_tracer_rows(tracer_init): + from mistralai import Mistral + import os + + client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + + with tracer_init.trace() as tracer: + client.chat.complete( + model="mistral-large-latest", + messages=[ + { + "role": "user", + "content": "Give me small summary of 100 years of Loneliness", + } + ], + max_tokens=100, + temperature=0.7, + ) + + impact = tracer.impact() + assert impact is not None + assert impact.total_energy_wh > 0 + assert impact.total_gco2e > 0 + assert impact.total_mlh2o > 0 + assert len(tracer.get_all_rows()) == 1 + + def test_tracer_submit_impact(tracer_init): from scope3ai.api.types import ImpactRow From f8def40046a4a3da71c30a7f0f8b14e72932eda9 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 13:03:38 -0500 Subject: [PATCH 02/14] fix(tracer): tracer impact duplication fix --- tests/test_tracer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test_tracer.py b/tests/test_tracer.py index 94ac28a..29421eb 100644 --- a/tests/test_tracer.py +++ b/tests/test_tracer.py @@ -191,9 +191,8 @@ def test_tracer_context_nested(tracer_init): @pytest.mark.vcr def test_tracer_rows(tracer_init): from mistralai import Mistral - import os - client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + client = Mistral() with tracer_init.trace() as tracer: client.chat.complete( From d504fe3a3861ec1962cff98fbc861e6e3149de2a Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 14:03:11 -0500 Subject: [PATCH 03/14] fix(mistral): fixing examples for mistral --- examples/mistralai/mistral-complete-async.py | 2 + examples/mistralai/mistral-stream-async.py | 12 ++-- examples/mistralai/mistral-stream.py | 11 +++- scope3ai/tracers/mistralai/chat.py | 66 +++++++++++--------- tests/test_mistralai.py | 62 ++++++++++++------ 5 files changed, 98 insertions(+), 55 deletions(-) diff --git a/examples/mistralai/mistral-complete-async.py b/examples/mistralai/mistral-complete-async.py index baf9738..cdc0e96 100644 --- a/examples/mistralai/mistral-complete-async.py +++ b/examples/mistralai/mistral-complete-async.py @@ -1,4 +1,5 @@ import asyncio +import os from scope3ai import Scope3AI from mistralai import Mistral @@ -10,6 +11,7 @@ async def main( temperature: float, api_key: str | None = None, ): + api_key = api_key or os.environ["MISTRAL_API_KEY"] scope3 = Scope3AI.init() client = Mistral(api_key=api_key) if api_key else Mistral() diff --git a/examples/mistralai/mistral-stream-async.py b/examples/mistralai/mistral-stream-async.py index 5d50372..55dd235 100644 --- a/examples/mistralai/mistral-stream-async.py +++ b/examples/mistralai/mistral-stream-async.py @@ -1,5 +1,5 @@ import asyncio - +import os from mistralai import Mistral from scope3ai import Scope3AI @@ -12,19 +12,23 @@ async def main( temperature: float, api_key: str | None = None, ): + api_key = api_key or os.environ["MISTRAL_API_KEY"] scope3 = Scope3AI.init() client = Mistral(api_key=api_key) if api_key else Mistral() with scope3.trace() as tracer: + chunk_count = 0 + content = "" async for chunk in await client.chat.stream_async( model=model, messages=[{"role": "user", "content": message}], max_tokens=max_tokens, temperature=temperature, ): - print(chunk.choices[0].delta.content or "", end="", flush=True) - print() - + chunk_count += 1 + content += chunk.data.choices[0].delta.content + print(f"Chunk count: {chunk_count}") + print(f"Content: {content}") impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") diff --git a/examples/mistralai/mistral-stream.py b/examples/mistralai/mistral-stream.py index a0d7c1c..509138e 100644 --- a/examples/mistralai/mistral-stream.py +++ b/examples/mistralai/mistral-stream.py @@ -1,5 +1,6 @@ from scope3ai import Scope3AI from mistralai import Mistral +import os def main( @@ -9,10 +10,13 @@ def main( temperature: float, api_key: str | None = None, ): + api_key = api_key or os.environ["MISTRAL_API_KEY"] scope3 = Scope3AI.init() client = Mistral(api_key=api_key) if api_key else Mistral() with scope3.trace() as tracer: + chunk_count = 0 + content = "" stream = client.chat.stream( model=model, messages=[{"role": "user", "content": message}], @@ -20,9 +24,10 @@ def main( temperature=temperature, ) for chunk in stream: - print(chunk.choices[0].delta.content or "", end="", flush=True) - print() - + chunk_count += 1 + content += chunk.data.choices[0].delta.content + print(f"Chunk count: {chunk_count}") + print(f"Content: {content}") impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") diff --git a/scope3ai/tracers/mistralai/chat.py b/scope3ai/tracers/mistralai/chat.py index 3f0c59e..1ff8de0 100644 --- a/scope3ai/tracers/mistralai/chat.py +++ b/scope3ai/tracers/mistralai/chat.py @@ -60,26 +60,27 @@ def mistralai_v1_chat_wrapper_stream( ) -> Iterable[CompletionEvent]: timer_start = time.perf_counter() stream = wrapped(*args, **kwargs) - token_count = 0 for i, chunk in enumerate(stream): - if i > 0 and chunk.data.choices[0].finish_reason is None: - token_count += 1 model_name = chunk.data.model if chunk.data: - request_latency = time.perf_counter() - timer_start - scope3_row = ImpactRow( - model_id=model_name, - input_tokens=token_count, - output_tokens=chunk.data.usage.completion_tokens - if chunk.data.usage - else 0, - request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, - ) - scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) - chunk.data = CompletionChunk( - **chunk.data.model_dump(), scope3ai=scope3ai_ctx - ) + input_tokens = 0 + output_tokens = 0 + if chunk.data.usage is not None: + input_tokens = chunk.data.usage.prompt_tokens + output_tokens = chunk.data.usage.completion_tokens + if input_tokens != 0 or output_tokens != 0: + request_latency = time.perf_counter() - timer_start + scope3_row = ImpactRow( + model_id=model_name, + input_tokens=input_tokens, + output_tokens=output_tokens, + request_duration_ms=request_latency * 1000, + managed_service_id=PROVIDER, + ) + scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) + chunk.data = CompletionChunk( + **chunk.data.model_dump(), scope3ai=scope3ai_ctx + ) yield chunk @@ -108,21 +109,26 @@ async def mistralai_v1_async_chat_wrapper( async def _generator( stream: AsyncGenerator[CompletionEvent, None], timer_start: float ) -> AsyncGenerator[CompletionEvent, None]: - token_count = 0 async for chunk in stream: + input_tokens = 0 + output_tokens = 0 if chunk.data.usage is not None: - token_count = chunk.data.usage.completion_tokens - request_latency = time.perf_counter() - timer_start - model_name = chunk.data.model - scope3_row = ImpactRow( - model_id=model_name, - input_tokens=token_count, - output_tokens=chunk.data.usage.completion_tokens if chunk.data.usage else 0, - request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, - ) - scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) - chunk.data = CompletionChunk(**chunk.data.model_dump(), scope3ai=scope3ai_ctx) + input_tokens = chunk.data.usage.prompt_tokens + output_tokens = chunk.data.usage.completion_tokens + if input_tokens != 0 or output_tokens != 0: + request_latency = time.perf_counter() - timer_start + model_name = chunk.data.model + scope3_row = ImpactRow( + model_id=model_name, + input_tokens=input_tokens, + output_tokens=output_tokens, + request_duration_ms=request_latency * 1000, + managed_service_id=PROVIDER, + ) + scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) + chunk.data = CompletionChunk( + **chunk.data.model_dump(), scope3ai=scope3ai_ctx + ) yield chunk diff --git a/tests/test_mistralai.py b/tests/test_mistralai.py index 3a45c5f..6e97be7 100644 --- a/tests/test_mistralai.py +++ b/tests/test_mistralai.py @@ -47,15 +47,28 @@ def test_mistralai_stream_chat(tracer_with_sync_init): stream = client.chat.stream( messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" ) - for chunk in stream: - assert getattr(chunk.data, "scope3ai") is not None - assert chunk.data.scope3ai.impact is not None - assert chunk.data.scope3ai.impact.total_impact is not None - assert chunk.data.scope3ai.impact.total_impact.usage_energy_wh > 0 - assert chunk.data.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 - assert chunk.data.scope3ai.impact.total_impact.usage_water_ml > 0 - assert chunk.data.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 - assert chunk.data.scope3ai.impact.total_impact.embodied_water_ml > 0 + with tracer_with_sync_init.trace() as tracer: + for chunk in stream: + if chunk.data.usage: + if ( + chunk.data.usage.prompt_tokens > 0 + or chunk.data.usage.completion_tokens > 0 + ): + assert getattr(chunk.data, "scope3ai") is not None + assert chunk.data.scope3ai.impact is not None + assert chunk.data.scope3ai.impact.total_impact is not None + assert chunk.data.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert ( + chunk.data.scope3ai.impact.total_impact.usage_emissions_gco2e + > 0 + ) + assert chunk.data.scope3ai.impact.total_impact.usage_water_ml > 0 + assert ( + chunk.data.scope3ai.impact.total_impact.embodied_emissions_gco2e + > 0 + ) + assert chunk.data.scope3ai.impact.total_impact.embodied_water_ml > 0 + assert len(tracer.get_all_rows()) > 0 @pytest.mark.vcr @@ -65,12 +78,25 @@ async def test_mistralai_async_stream_chat(tracer_with_sync_init): stream = await client.chat.stream_async( messages=[{"role": "user", "content": "Hello World!"}], model="mistral-tiny" ) - async for chunk in stream: - assert getattr(chunk.data, "scope3ai") is not None - assert chunk.data.scope3ai.impact is not None - assert chunk.data.scope3ai.impact.total_impact is not None - assert chunk.data.scope3ai.impact.total_impact.usage_energy_wh > 0 - assert chunk.data.scope3ai.impact.total_impact.usage_emissions_gco2e > 0 - assert chunk.data.scope3ai.impact.total_impact.usage_water_ml > 0 - assert chunk.data.scope3ai.impact.total_impact.embodied_emissions_gco2e > 0 - assert chunk.data.scope3ai.impact.total_impact.embodied_water_ml > 0 + with tracer_with_sync_init.trace() as tracer: + async for chunk in stream: + if chunk.data.usage: + if ( + chunk.data.usage.prompt_tokens > 0 + or chunk.data.usage.completion_tokens > 0 + ): + assert getattr(chunk.data, "scope3ai") is not None + assert chunk.data.scope3ai.impact is not None + assert chunk.data.scope3ai.impact.total_impact is not None + assert chunk.data.scope3ai.impact.total_impact.usage_energy_wh > 0 + assert ( + chunk.data.scope3ai.impact.total_impact.usage_emissions_gco2e + > 0 + ) + assert chunk.data.scope3ai.impact.total_impact.usage_water_ml > 0 + assert ( + chunk.data.scope3ai.impact.total_impact.embodied_emissions_gco2e + > 0 + ) + assert chunk.data.scope3ai.impact.total_impact.embodied_water_ml > 0 + assert len(tracer.get_all_rows()) > 0 From f9e162e6e713d18b02bbbdb4e08e6769335ce099 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 14:05:10 -0500 Subject: [PATCH 04/14] fix(mistral): fixing readme for mistral --- examples/mistralai/README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/mistralai/README.md b/examples/mistralai/README.md index e2c666b..ea50e01 100644 --- a/examples/mistralai/README.md +++ b/examples/mistralai/README.md @@ -12,14 +12,16 @@ This directory contains examples of using Mistral AI's API with environmental im ## Usage Examples +From the root directory, run the following commands: + ```bash -python mistral-complete.py --model "mistral-large-latest" --message "What is artificial intelligence?" --max-tokens 100 +uv run python -m examples.mistralai.mistral-complete --model "mistral-large-latest" --message "What is artificial intelligence?" --max-tokens 100 # With custom temperature -python mistral-complete.py --message "Write a story" --temperature 0.9 +uv run python -m examples.mistralai.mistral-complete --message "Write a story" --temperature 0.9 -python mistral-stream.py --message "Explain quantum mechanics" --max-tokens 200 +uv run python -m examples.mistralai.mistral-stream --message "Explain quantum mechanics" --max-tokens 200 # Async streaming -python mistral-stream-async.py--model "mistral-medium" --message "Tell me a story" +uv run python -m examples.mistralai.mistral-stream-async --model "mistral-medium" --message "Tell me a story" ``` \ No newline at end of file From 8eaa1a8d7a4a9168349cb23ebdec762f02b00085 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 14:40:27 -0500 Subject: [PATCH 05/14] fix: fixing stream for mistral --- examples/mistralai/mistral-stream-async.py | 5 ++--- examples/mistralai/mistral-stream.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/examples/mistralai/mistral-stream-async.py b/examples/mistralai/mistral-stream-async.py index 55dd235..9857994 100644 --- a/examples/mistralai/mistral-stream-async.py +++ b/examples/mistralai/mistral-stream-async.py @@ -18,7 +18,6 @@ async def main( with scope3.trace() as tracer: chunk_count = 0 - content = "" async for chunk in await client.chat.stream_async( model=model, messages=[{"role": "user", "content": message}], @@ -26,9 +25,9 @@ async def main( temperature=temperature, ): chunk_count += 1 - content += chunk.data.choices[0].delta.content + print(chunk.data.choices[0].delta.content, end="", flush=True) + print() print(f"Chunk count: {chunk_count}") - print(f"Content: {content}") impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") diff --git a/examples/mistralai/mistral-stream.py b/examples/mistralai/mistral-stream.py index 509138e..0777f57 100644 --- a/examples/mistralai/mistral-stream.py +++ b/examples/mistralai/mistral-stream.py @@ -16,7 +16,6 @@ def main( with scope3.trace() as tracer: chunk_count = 0 - content = "" stream = client.chat.stream( model=model, messages=[{"role": "user", "content": message}], @@ -25,9 +24,9 @@ def main( ) for chunk in stream: chunk_count += 1 - content += chunk.data.choices[0].delta.content + print(chunk.data.choices[0].delta.content, end="", flush=True) + print() print(f"Chunk count: {chunk_count}") - print(f"Content: {content}") impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") From 5f35a553e709dce0840eb92f4166705c8d935272 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 16:32:10 -0500 Subject: [PATCH 06/14] fix: fixing examples for openai --- examples/openai/README.md | 18 ++++++--- examples/openai/openai-async-speech.py | 37 ++++++++++++++++++ examples/openai/openai-async-stream-chat.py | 29 +++++--------- examples/openai/openai-async-transcription.py | 38 +++++++++++++++++++ examples/openai/openai-speech.py | 35 +++++++++++++++++ ...{openai-stt.py => openai-transcription.py} | 2 +- scope3ai/tracers/openai/chat.py | 2 +- scope3ai/tracers/openai/speech_to_text.py | 2 +- scope3ai/tracers/openai/text_to_image.py | 5 ++- scope3ai/tracers/openai/text_to_speech.py | 2 +- scope3ai/tracers/openai/translation.py | 2 +- 11 files changed, 140 insertions(+), 32 deletions(-) create mode 100644 examples/openai/openai-async-speech.py create mode 100644 examples/openai/openai-async-transcription.py create mode 100644 examples/openai/openai-speech.py rename examples/openai/{openai-stt.py => openai-transcription.py} (92%) diff --git a/examples/openai/README.md b/examples/openai/README.md index 3208d12..fe2d39c 100644 --- a/examples/openai/README.md +++ b/examples/openai/README.md @@ -24,20 +24,26 @@ This directory contains examples of using OpenAI's API with environmental impact ```bash # Basic chat -python openai-chat.py --model "gpt-4" --message "What is artificial intelligence?" --max-tokens 100 +uv run python -m examples.openai.openai-chat --model "gpt-4" --message "What is artificial intelligence?" # Async streaming chat -python openai-async-stream-chat.py --model "gpt-4" --message "Explain quantum computing" --stream +uv run python -m examples.openai.openai-async-stream-chat --model "gpt-4" --message "Explain quantum computing" # Generate an image -python openai-image.py --prompt "A beautiful sunset over mountains" --size "1024x1024" +uv run python -m examples.openai.openai-image --prompt "A beautiful sunset over mountains" --model "dall-e-2" --size "1024x1024" # Async image generation -python openai-async-image.py --prompt "A futuristic city" --model "dall-e-3" +uv run python -m examples.openai.openai-async-image --prompt "A futuristic city" --model "dall-e-2" --size "1024x1024" # Text to Speech -python openai-speech.py --text "Hello, welcome to the future!" --voice "alloy" --output speech.mp3 +uv run python -m examples.openai.openai-speech --text "Hello, welcome to the future!" --model "tts-1" --response-format "mp3" + +# Async Text to Speech +uv run python -m examples.openai.openai-async-speech --text "Hello, welcome to the future!" --model "tts-1" --response-format "mp3" # Audio Transcription -python openai-transcription.py --file "recording.mp3" --model "whisper-1" +uv run python -m examples.openai.openai-transcription --filename "recording.mp3" --model "whisper-1" + +# Async Audio Transcription +uv run python -m examples.openai.openai-async-transcription --filename "recording.mp3" --model "whisper-1" ``` \ No newline at end of file diff --git a/examples/openai/openai-async-speech.py b/examples/openai/openai-async-speech.py new file mode 100644 index 0000000..6ef6ca3 --- /dev/null +++ b/examples/openai/openai-async-speech.py @@ -0,0 +1,37 @@ +import asyncio + +from openai import AsyncOpenAI +from scope3ai import Scope3AI + + +async def main(text: str, model: str, response_format: str): + client = AsyncOpenAI() + scope3 = Scope3AI.init() + + with scope3.trace() as tracer: + response = await client.audio.speech.create( + model=model, + voice="alloy", + input=text, + response_format=response_format, + ) + print(response) + print(response.scope3ai.request.model_dump(mode="json")) + impact = tracer.impact() + print(impact) + print(f"Total Energy Wh: {impact.total_energy_wh}") + print(f"Total GCO2e: {impact.total_gco2e}") + print(f"Total MLH2O: {impact.total_mlh2o}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="OpenAI Speech to Text") + parser.add_argument("--model", type=str, default="whisper-1", help="Model") + parser.add_argument( + "--response-format", type=str, default="json", help="Response format" + ) + parser.add_argument("--text", type=str, help="The text to convert to speech") + args = parser.parse_args() + asyncio.run(main(**vars(args))) diff --git a/examples/openai/openai-async-stream-chat.py b/examples/openai/openai-async-stream-chat.py index 31ef51f..bddd78d 100644 --- a/examples/openai/openai-async-stream-chat.py +++ b/examples/openai/openai-async-stream-chat.py @@ -5,26 +5,24 @@ from scope3ai import Scope3AI -async def main(model: str, message: str, role: str, stream: bool): +async def main(model: str, message: str, role: str): client = AsyncOpenAI() scope3 = Scope3AI.init() with scope3.trace() as tracer: + chunk_count = 0 response = await client.chat.completions.create( model=model, messages=[{"role": role, "content": message}], - stream=stream, + stream=True, ) - - if stream: - async for event in response: - if not event.choices: - continue - print(event.choices[0].delta.content, end="", flush=True) - print() - else: - print(response.choices[0].message.content.strip()) - + async for event in response: + chunk_count += 1 + if not event.choices: + continue + print(event.choices[0].delta.content, end="", flush=True) + print() + print(f"Chunk count: {chunk_count}") impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") @@ -56,12 +54,5 @@ async def main(model: str, message: str, role: str, stream: bool): default="user", help="Role for the message (user, system, or assistant)", ) - parser.add_argument( - "--no-stream", - action="store_false", - dest="stream", - help="Disable streaming mode", - default=True, - ) args = parser.parse_args() asyncio.run(main(**vars(args))) diff --git a/examples/openai/openai-async-transcription.py b/examples/openai/openai-async-transcription.py new file mode 100644 index 0000000..114a847 --- /dev/null +++ b/examples/openai/openai-async-transcription.py @@ -0,0 +1,38 @@ +from pathlib import Path +import asyncio + +from openai import AsyncOpenAI +from scope3ai import Scope3AI + + +async def main(filename: Path, model: str, response_format: str): + client = AsyncOpenAI() + scope3 = Scope3AI.init() + + with scope3.trace() as tracer: + response = await client.audio.transcriptions.create( + model=model, + file=filename, + response_format=response_format, + ) + print(response) + print(response.scope3ai.request.model_dump(mode="json")) + print("---------------") + impact = tracer.impact() + print(impact) + print(f"Total Energy Wh: {impact.total_energy_wh}") + print(f"Total GCO2e: {impact.total_gco2e}") + print(f"Total MLH2O: {impact.total_mlh2o}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="OpenAI Speech to Text") + parser.add_argument("--model", type=str, default="whisper-1", help="Model") + parser.add_argument( + "--response_format", type=str, default="json", help="Response format" + ) + parser.add_argument("--filename", type=Path, help="The path to the input file") + args = parser.parse_args() + asyncio.run(main(**vars(args))) diff --git a/examples/openai/openai-speech.py b/examples/openai/openai-speech.py new file mode 100644 index 0000000..5eb3f81 --- /dev/null +++ b/examples/openai/openai-speech.py @@ -0,0 +1,35 @@ +from openai import OpenAI +from scope3ai import Scope3AI + + +def main(text: str, model: str, response_format: str): + client = OpenAI() + scope3 = Scope3AI.init() + + with scope3.trace() as tracer: + response = client.audio.speech.create( + model=model, + voice="alloy", + input=text, + response_format=response_format, + ) + print(response) + print(response.scope3ai.request) + impact = tracer.impact() + print(impact) + print(f"Total Energy Wh: {impact.total_energy_wh}") + print(f"Total GCO2e: {impact.total_gco2e}") + print(f"Total MLH2O: {impact.total_mlh2o}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="OpenAI Speech to Text") + parser.add_argument("--model", type=str, default="whisper-1", help="Model") + parser.add_argument( + "--response-format", type=str, default="json", help="Response format" + ) + parser.add_argument("--text", type=str, help="The text to convert to speech") + args = parser.parse_args() + main(**vars(args)) diff --git a/examples/openai/openai-stt.py b/examples/openai/openai-transcription.py similarity index 92% rename from examples/openai/openai-stt.py rename to examples/openai/openai-transcription.py index 3fb1248..50df34f 100644 --- a/examples/openai/openai-stt.py +++ b/examples/openai/openai-transcription.py @@ -31,6 +31,6 @@ def main(filename: Path, model: str, response_format: str): parser.add_argument( "--response_format", type=str, default="json", help="Response format" ) - parser.add_argument("filename", type=Path, help="The path to the input file") + parser.add_argument("--filename", type=Path, help="The path to the input file") args = parser.parse_args() main(**vars(args)) diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 0bd29c6..112288d 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -17,7 +17,7 @@ ) PROVIDER = PROVIDERS.OPENAI.value - +PROVIDER = "" logger = logging.getLogger("scope3ai.tracers.openai.chat") diff --git a/scope3ai/tracers/openai/speech_to_text.py b/scope3ai/tracers/openai/speech_to_text.py index 0af5fb1..b231ef9 100644 --- a/scope3ai/tracers/openai/speech_to_text.py +++ b/scope3ai/tracers/openai/speech_to_text.py @@ -16,7 +16,7 @@ from scope3ai.tracers.utils.audio import _get_file_audio_duration PROVIDER = PROVIDERS.OPENAI.value - +PROVIDER = "" logger = logging.getLogger("scope3.tracers.openai.speech_to_text") diff --git a/scope3ai/tracers/openai/text_to_image.py b/scope3ai/tracers/openai/text_to_image.py index ba4de5f..21dc5fc 100644 --- a/scope3ai/tracers/openai/text_to_image.py +++ b/scope3ai/tracers/openai/text_to_image.py @@ -3,13 +3,14 @@ from openai.resources.images import AsyncImages, Images from openai.types.images_response import ImagesResponse as _ImageResponse - +from scope3ai.constants import PROVIDERS from scope3ai.api.types import ImpactRow, Scope3AIContext, Task from scope3ai.api.typesgen import Image as RootImage from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse -PROVIDER = "openai" +PROVIDER = PROVIDERS.OPENAI.value +PROVIDER = "" DEFAULT_MODEL = "dall-e-2" DEFAULT_SIZE = "1024x1024" DEFAULT_N = 1 diff --git a/scope3ai/tracers/openai/text_to_speech.py b/scope3ai/tracers/openai/text_to_speech.py index ff10bee..6598746 100644 --- a/scope3ai/tracers/openai/text_to_speech.py +++ b/scope3ai/tracers/openai/text_to_speech.py @@ -12,7 +12,7 @@ from scope3ai.tracers.utils.audio import _get_audio_duration PROVIDER = PROVIDERS.OPENAI.value - +PROVIDER = "" logger = logging.getLogger(f"scope3ai.tracers.{__name__}") diff --git a/scope3ai/tracers/openai/translation.py b/scope3ai/tracers/openai/translation.py index c56449d..66fbecf 100644 --- a/scope3ai/tracers/openai/translation.py +++ b/scope3ai/tracers/openai/translation.py @@ -16,7 +16,7 @@ from scope3ai.tracers.utils.audio import _get_file_audio_duration PROVIDER = PROVIDERS.OPENAI.value - +PROVIDER = "" logger = logging.getLogger(__name__) From 26c524fc8b75671432ce8c6c3481174581a7a1a2 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 17:06:09 -0500 Subject: [PATCH 07/14] fix: fixing openai tests --- tests/test_litellm_multimodal_output.py | 4 ++-- tests/test_litellm_multimodal_use_default.py | 13 ++++++------- tests/test_litellm_tracer_use_default.py | 12 ++++++------ tests/test_openai_multimodal_output.py | 3 +-- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/tests/test_litellm_multimodal_output.py b/tests/test_litellm_multimodal_output.py index 7ad735f..5d33d9a 100644 --- a/tests/test_litellm_multimodal_output.py +++ b/tests/test_litellm_multimodal_output.py @@ -16,7 +16,7 @@ def test_litellm_multimodal_output_openai(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 @@ -72,7 +72,7 @@ async def test_litellm_multimodal_output_openai_async( ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 diff --git a/tests/test_litellm_multimodal_use_default.py b/tests/test_litellm_multimodal_use_default.py index b1d9ceb..1463b28 100644 --- a/tests/test_litellm_multimodal_use_default.py +++ b/tests/test_litellm_multimodal_use_default.py @@ -2,7 +2,6 @@ import litellm from scope3ai.api.typesgen import Image -from scope3ai.constants import PROVIDERS from tests.utils import ( load_image_b64, TEST_IMAGE_PNG, @@ -41,7 +40,7 @@ def test_litellm_multimodal_vision_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 872 assert response.scope3ai.request.output_tokens == 59 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -87,7 +86,7 @@ def test_litellm_multimodal_vision_2_images_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 1082 assert response.scope3ai.request.output_tokens == 54 assert response.scope3ai.request.input_images == [ @@ -129,7 +128,7 @@ def test_litellm_multimodal_audio_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 29 assert response.scope3ai.request.output_tokens == 10 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -175,7 +174,7 @@ def test_litellm_multimodal_audio_2_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 46 assert response.scope3ai.request.output_tokens == 35 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -214,7 +213,7 @@ def test_litellm_multimodal_vision_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 4172 assert response.scope3ai.request.output_tokens == 81 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -260,7 +259,7 @@ def test_litellm_multimodal_vision_2_images_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 5228 assert response.scope3ai.request.output_tokens == 109 assert response.scope3ai.request.input_images == [ diff --git a/tests/test_litellm_tracer_use_default.py b/tests/test_litellm_tracer_use_default.py index 16c8315..db2723b 100644 --- a/tests/test_litellm_tracer_use_default.py +++ b/tests/test_litellm_tracer_use_default.py @@ -100,7 +100,7 @@ def test_litellm_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request is not None assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.impact is not None @@ -123,7 +123,7 @@ async def test_litellm_async_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request is not None assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.impact is not None @@ -150,7 +150,7 @@ def test_litellm_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -177,7 +177,7 @@ async def test_litellm_async_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -202,7 +202,7 @@ def test_litellm_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 @@ -229,7 +229,7 @@ async def test_litellm_async_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 diff --git a/tests/test_openai_multimodal_output.py b/tests/test_openai_multimodal_output.py index 0bec5f0..6f1a895 100644 --- a/tests/test_openai_multimodal_output.py +++ b/tests/test_openai_multimodal_output.py @@ -1,5 +1,4 @@ import pytest -from scope3ai.constants import PROVIDERS @pytest.mark.vcr @@ -18,7 +17,7 @@ def test_openai_multimodal_output(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 From 1c38cf81b5956b8e75727ecb3a32f8a40933b9c7 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 17:08:10 -0500 Subject: [PATCH 08/14] fix: fixing openai tests --- examples/openai/openai-async-speech.py | 1 - examples/openai/openai-async-transcription.py | 1 - 2 files changed, 2 deletions(-) diff --git a/examples/openai/openai-async-speech.py b/examples/openai/openai-async-speech.py index 6ef6ca3..c3fc537 100644 --- a/examples/openai/openai-async-speech.py +++ b/examples/openai/openai-async-speech.py @@ -16,7 +16,6 @@ async def main(text: str, model: str, response_format: str): response_format=response_format, ) print(response) - print(response.scope3ai.request.model_dump(mode="json")) impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") diff --git a/examples/openai/openai-async-transcription.py b/examples/openai/openai-async-transcription.py index 114a847..745a615 100644 --- a/examples/openai/openai-async-transcription.py +++ b/examples/openai/openai-async-transcription.py @@ -16,7 +16,6 @@ async def main(filename: Path, model: str, response_format: str): response_format=response_format, ) print(response) - print(response.scope3ai.request.model_dump(mode="json")) print("---------------") impact = tracer.impact() print(impact) From 64d30d6a225d64fc795800fb319a5dbc4df6b07d Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 17:17:38 -0500 Subject: [PATCH 09/14] fix: fixing openai tests --- scope3ai/tracers/openai/chat.py | 3 +-- scope3ai/tracers/openai/speech_to_text.py | 3 +-- scope3ai/tracers/openai/text_to_image.py | 3 +-- scope3ai/tracers/openai/text_to_speech.py | 7 +++---- tests/test_litellm_multimodal_output.py | 4 ++++ tests/test_litellm_multimodal_use_default.py | 12 ++++++++++++ tests/test_litellm_tracer_use_default.py | 12 ++++++++++++ tests/test_openai_multimodal_output.py | 2 ++ 8 files changed, 36 insertions(+), 10 deletions(-) diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 112288d..8c3d11c 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -9,14 +9,13 @@ from openai.types.chat import ChatCompletionChunk as _ChatCompletionChunk from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.utils.multimodal import ( aggregate_multimodal, aggregate_multimodal_audio_content_output, ) -PROVIDER = PROVIDERS.OPENAI.value +# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it PROVIDER = "" logger = logging.getLogger("scope3ai.tracers.openai.chat") diff --git a/scope3ai/tracers/openai/speech_to_text.py b/scope3ai/tracers/openai/speech_to_text.py index b231ef9..bbbf4b6 100644 --- a/scope3ai/tracers/openai/speech_to_text.py +++ b/scope3ai/tracers/openai/speech_to_text.py @@ -10,12 +10,11 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.OPENAI.value +# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it PROVIDER = "" logger = logging.getLogger("scope3.tracers.openai.speech_to_text") diff --git a/scope3ai/tracers/openai/text_to_image.py b/scope3ai/tracers/openai/text_to_image.py index 21dc5fc..45fcecf 100644 --- a/scope3ai/tracers/openai/text_to_image.py +++ b/scope3ai/tracers/openai/text_to_image.py @@ -3,13 +3,12 @@ from openai.resources.images import AsyncImages, Images from openai.types.images_response import ImagesResponse as _ImageResponse -from scope3ai.constants import PROVIDERS from scope3ai.api.types import ImpactRow, Scope3AIContext, Task from scope3ai.api.typesgen import Image as RootImage from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse -PROVIDER = PROVIDERS.OPENAI.value +# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it PROVIDER = "" DEFAULT_MODEL = "dall-e-2" DEFAULT_SIZE = "1024x1024" diff --git a/scope3ai/tracers/openai/text_to_speech.py b/scope3ai/tracers/openai/text_to_speech.py index 6598746..d73b8cf 100644 --- a/scope3ai/tracers/openai/text_to_speech.py +++ b/scope3ai/tracers/openai/text_to_speech.py @@ -1,17 +1,16 @@ import logging import time -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, Tuple import tiktoken from openai.resources.audio.speech import AsyncSpeech, Speech, _legacy_response from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_audio_duration -PROVIDER = PROVIDERS.OPENAI.value +# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it PROVIDER = "" logger = logging.getLogger(f"scope3ai.tracers.{__name__}") @@ -26,7 +25,7 @@ def _openai_text_to_speech_get_impact_row( response: _legacy_response.HttpxBinaryResponseContent, request_latency: float, kwargs: Any, -) -> (HttpxBinaryResponseContent, ImpactRow): +) -> Tuple[HttpxBinaryResponseContent, ImpactRow]: # try getting duration response_format = kwargs.get("response_format", "mp3") duration = _get_audio_duration(response_format, response.content) diff --git a/tests/test_litellm_multimodal_output.py b/tests/test_litellm_multimodal_output.py index 5d33d9a..02c9879 100644 --- a/tests/test_litellm_multimodal_output.py +++ b/tests/test_litellm_multimodal_output.py @@ -16,7 +16,9 @@ def test_litellm_multimodal_output_openai(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 @@ -72,7 +74,9 @@ async def test_litellm_multimodal_output_openai_async( ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 diff --git a/tests/test_litellm_multimodal_use_default.py b/tests/test_litellm_multimodal_use_default.py index 1463b28..1253242 100644 --- a/tests/test_litellm_multimodal_use_default.py +++ b/tests/test_litellm_multimodal_use_default.py @@ -40,7 +40,9 @@ def test_litellm_multimodal_vision_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 872 assert response.scope3ai.request.output_tokens == 59 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -86,7 +88,9 @@ def test_litellm_multimodal_vision_2_images_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 1082 assert response.scope3ai.request.output_tokens == 54 assert response.scope3ai.request.input_images == [ @@ -128,7 +132,9 @@ def test_litellm_multimodal_audio_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 29 assert response.scope3ai.request.output_tokens == 10 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -174,7 +180,9 @@ def test_litellm_multimodal_audio_2_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 46 assert response.scope3ai.request.output_tokens == 35 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -213,7 +221,9 @@ def test_litellm_multimodal_vision_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 4172 assert response.scope3ai.request.output_tokens == 81 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -259,7 +269,9 @@ def test_litellm_multimodal_vision_2_images_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 5228 assert response.scope3ai.request.output_tokens == 109 assert response.scope3ai.request.input_images == [ diff --git a/tests/test_litellm_tracer_use_default.py b/tests/test_litellm_tracer_use_default.py index db2723b..c6e0100 100644 --- a/tests/test_litellm_tracer_use_default.py +++ b/tests/test_litellm_tracer_use_default.py @@ -100,7 +100,9 @@ def test_litellm_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.impact is not None @@ -123,7 +125,9 @@ async def test_litellm_async_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.request_duration_ms > 0 assert response.scope3ai.impact is not None @@ -150,7 +154,9 @@ def test_litellm_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -177,7 +183,9 @@ async def test_litellm_async_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -202,7 +210,9 @@ def test_litellm_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 @@ -229,7 +239,9 @@ async def test_litellm_async_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 diff --git a/tests/test_openai_multimodal_output.py b/tests/test_openai_multimodal_output.py index 6f1a895..de593b1 100644 --- a/tests/test_openai_multimodal_output.py +++ b/tests/test_openai_multimodal_output.py @@ -17,7 +17,9 @@ def test_openai_multimodal_output(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None + # TODO: Add this assert when AiApi support it # assert response.scope3ai.request.managed_service_id == PROVIDERS.OPENAI.value + assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 From cad0d8687bef60322c88f4099616a890c63ae453 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 17:19:05 -0500 Subject: [PATCH 10/14] fix: fixing openai tests --- scope3ai/tracers/openai/translation.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scope3ai/tracers/openai/translation.py b/scope3ai/tracers/openai/translation.py index 66fbecf..6a1c8cb 100644 --- a/scope3ai/tracers/openai/translation.py +++ b/scope3ai/tracers/openai/translation.py @@ -10,12 +10,11 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.OPENAI.value +# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it PROVIDER = "" logger = logging.getLogger(__name__) From c4817dbc26303414ca97f5efa895fb1e84ad145e Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 17:29:00 -0500 Subject: [PATCH 11/14] fix: fix managed_service_id --- scope3ai/lib.py | 1 + scope3ai/tracers/openai/chat.py | 5 +++-- scope3ai/tracers/openai/speech_to_text.py | 8 ++++---- scope3ai/tracers/openai/text_to_image.py | 8 ++++---- scope3ai/tracers/openai/text_to_speech.py | 4 ++-- scope3ai/tracers/openai/translation.py | 4 ++-- 6 files changed, 16 insertions(+), 14 deletions(-) diff --git a/scope3ai/lib.py b/scope3ai/lib.py index 35dc4a0..a79cee5 100644 --- a/scope3ai/lib.py +++ b/scope3ai/lib.py @@ -423,6 +423,7 @@ def set_only_if(row, field, *values): return row.request_id = generate_id() + row.managed_service_id = "" if root_tracer: set_only_if(row, "trace_id", root_tracer.trace_id) if row.utc_datetime is None: diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 8c3d11c..0bd29c6 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -9,14 +9,15 @@ from openai.types.chat import ChatCompletionChunk as _ChatCompletionChunk from scope3ai.api.types import ImpactRow, Scope3AIContext +from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.utils.multimodal import ( aggregate_multimodal, aggregate_multimodal_audio_content_output, ) -# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it -PROVIDER = "" +PROVIDER = PROVIDERS.OPENAI.value + logger = logging.getLogger("scope3ai.tracers.openai.chat") diff --git a/scope3ai/tracers/openai/speech_to_text.py b/scope3ai/tracers/openai/speech_to_text.py index bbbf4b6..b8f5042 100644 --- a/scope3ai/tracers/openai/speech_to_text.py +++ b/scope3ai/tracers/openai/speech_to_text.py @@ -1,6 +1,6 @@ import logging import time -from typing import Any, Callable, Optional, Union +from typing import Any, Callable, Optional, Tuple, Union import tiktoken from openai.resources.audio.transcriptions import AsyncTranscriptions, Transcriptions @@ -10,12 +10,12 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task +from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it -PROVIDER = "" +PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger("scope3.tracers.openai.speech_to_text") @@ -33,7 +33,7 @@ class TranscriptionVerbose(BaseModelResponse, _TranscriptionVerbose): def _openai_speech_to_text_get_impact_row( response: Any, request_latency: float, kwargs: dict -) -> (Union[Transcription, TranscriptionVerbose, str], ImpactRow): +) -> Tuple[Union[Transcription, TranscriptionVerbose, str], ImpactRow]: model = kwargs["model"] encoder = tiktoken.get_encoding("cl100k_base") diff --git a/scope3ai/tracers/openai/text_to_image.py b/scope3ai/tracers/openai/text_to_image.py index 45fcecf..67050a8 100644 --- a/scope3ai/tracers/openai/text_to_image.py +++ b/scope3ai/tracers/openai/text_to_image.py @@ -1,15 +1,15 @@ import time -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, Tuple from openai.resources.images import AsyncImages, Images from openai.types.images_response import ImagesResponse as _ImageResponse from scope3ai.api.types import ImpactRow, Scope3AIContext, Task from scope3ai.api.typesgen import Image as RootImage +from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse -# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it -PROVIDER = "" +PROVIDER = PROVIDERS.OPENAI.value DEFAULT_MODEL = "dall-e-2" DEFAULT_SIZE = "1024x1024" DEFAULT_N = 1 @@ -21,7 +21,7 @@ class ImageResponse(BaseModelResponse, _ImageResponse): def _openai_image_get_impact_row( response: _ImageResponse, request_latency: float, **kwargs: Any -) -> (ImageResponse, ImpactRow): +) -> Tuple[ImageResponse, ImpactRow]: model = kwargs.get("model", DEFAULT_MODEL) size = RootImage(root=kwargs.get("size", DEFAULT_SIZE)) n = kwargs.get("n", DEFAULT_N) diff --git a/scope3ai/tracers/openai/text_to_speech.py b/scope3ai/tracers/openai/text_to_speech.py index d73b8cf..cbfd51e 100644 --- a/scope3ai/tracers/openai/text_to_speech.py +++ b/scope3ai/tracers/openai/text_to_speech.py @@ -6,12 +6,12 @@ from openai.resources.audio.speech import AsyncSpeech, Speech, _legacy_response from scope3ai.api.types import ImpactRow, Scope3AIContext, Task +from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_audio_duration -# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it -PROVIDER = "" +PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger(f"scope3ai.tracers.{__name__}") diff --git a/scope3ai/tracers/openai/translation.py b/scope3ai/tracers/openai/translation.py index 6a1c8cb..e6427e4 100644 --- a/scope3ai/tracers/openai/translation.py +++ b/scope3ai/tracers/openai/translation.py @@ -10,12 +10,12 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task +from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -# TODO: PROVIDER = PROVIDERS.OPENAI.value for now AiApi does not support it -PROVIDER = "" +PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger(__name__) From 757c5a4553527e5543fae103a29823c990018106 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 17:34:24 -0500 Subject: [PATCH 12/14] fix: fix managed_service_id --- examples/openai/openai-async-transcription.py | 1 - tests/test_litellm_multimodal.py | 19 ++++++++++++------- tests/test_litellm_multimodal_output.py | 7 ++++--- tests/test_litellm_tracer.py | 19 ++++++++++++------- tests/test_litellm_tracer_use_default.py | 7 ++++--- 5 files changed, 32 insertions(+), 21 deletions(-) diff --git a/examples/openai/openai-async-transcription.py b/examples/openai/openai-async-transcription.py index 745a615..ef36326 100644 --- a/examples/openai/openai-async-transcription.py +++ b/examples/openai/openai-async-transcription.py @@ -16,7 +16,6 @@ async def main(filename: Path, model: str, response_format: str): response_format=response_format, ) print(response) - print("---------------") impact = tracer.impact() print(impact) print(f"Total Energy Wh: {impact.total_energy_wh}") diff --git a/tests/test_litellm_multimodal.py b/tests/test_litellm_multimodal.py index 2a455cb..b14667b 100644 --- a/tests/test_litellm_multimodal.py +++ b/tests/test_litellm_multimodal.py @@ -2,7 +2,6 @@ import litellm from scope3ai.api.typesgen import Image -from scope3ai.constants import PROVIDERS from tests.utils import ( load_image_b64, TEST_IMAGE_PNG, @@ -41,7 +40,8 @@ def test_litellm_multimodal_vision_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 872 assert response.scope3ai.request.output_tokens == 931 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -87,7 +87,8 @@ def test_litellm_multimodal_vision_2_images_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 1082 assert response.scope3ai.request.output_tokens == 1136 assert response.scope3ai.request.input_images == [ @@ -129,7 +130,8 @@ def test_litellm_multimodal_audio_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 29 assert response.scope3ai.request.output_tokens == 39 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -175,7 +177,8 @@ def test_litellm_multimodal_audio_2_openai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 46 assert response.scope3ai.request.output_tokens == 81 assert response.scope3ai.request.input_audio_seconds >= 1 @@ -214,7 +217,8 @@ def test_litellm_multimodal_vision_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 4172 assert response.scope3ai.request.output_tokens == 4253 assert response.scope3ai.request.input_images == [Image(root="1024x1024")] @@ -260,7 +264,8 @@ def test_litellm_multimodal_vision_2_images_mistralai(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 5228 assert response.scope3ai.request.output_tokens == 5337 assert response.scope3ai.request.input_images == [ diff --git a/tests/test_litellm_multimodal_output.py b/tests/test_litellm_multimodal_output.py index 02c9879..16ae2fc 100644 --- a/tests/test_litellm_multimodal_output.py +++ b/tests/test_litellm_multimodal_output.py @@ -1,6 +1,5 @@ import litellm import pytest -from scope3ai.constants import PROVIDERS @pytest.mark.vcr @@ -45,7 +44,8 @@ def test_litellm_multimodal_output_default(tracer_with_sync_init, audio_format): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 @@ -106,7 +106,8 @@ async def test_litellm_multimodal_output_default_async( ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 17 assert response.scope3ai.request.output_tokens > 0 assert response.scope3ai.request.output_audio_seconds > 0 diff --git a/tests/test_litellm_tracer.py b/tests/test_litellm_tracer.py index 91eb75f..2090fa2 100644 --- a/tests/test_litellm_tracer.py +++ b/tests/test_litellm_tracer.py @@ -3,7 +3,6 @@ import litellm import pytest -from scope3ai.constants import PROVIDERS USE_ALWAYS_LITELLM_TRACER = True @@ -98,7 +97,8 @@ def test_litellm_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 8 assert response.scope3ai.request.request_duration_ms > 0 @@ -122,7 +122,8 @@ async def test_litellm_async_image_generation(tracer_with_sync_init): assert response assert len(response.data) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 6 assert response.scope3ai.request.request_duration_ms > 0 @@ -150,7 +151,8 @@ def test_litellm_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -177,7 +179,8 @@ async def test_litellm_async_speech_to_text(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.output_tokens == 2 assert response.scope3ai.request.request_duration_ms > 0 @@ -202,7 +205,8 @@ def test_litellm_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 @@ -229,7 +233,8 @@ async def test_litellm_async_text_to_speech(tracer_with_sync_init): assert response.text is not None assert len(response.text) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request is not None assert response.scope3ai.request.input_tokens == 12 assert response.scope3ai.request.request_duration_ms > 0 diff --git a/tests/test_litellm_tracer_use_default.py b/tests/test_litellm_tracer_use_default.py index c6e0100..a480102 100644 --- a/tests/test_litellm_tracer_use_default.py +++ b/tests/test_litellm_tracer_use_default.py @@ -3,7 +3,6 @@ import litellm import pytest -from scope3ai.constants import PROVIDERS USE_ALWAYS_LITELLM_TRACER = False @@ -18,7 +17,8 @@ def test_litellm_chat(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 44 assert response.scope3ai.request.output_tokens == 69 assert response.scope3ai.impact is not None @@ -40,7 +40,8 @@ async def test_litellm_async_chat(tracer_with_sync_init): ) assert len(response.choices) > 0 assert getattr(response, "scope3ai") is not None - assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value + # TODO: Add this assert when AiApi support it + # assert response.scope3ai.request.managed_service_id == PROVIDERS.LITELLM.value assert response.scope3ai.request.input_tokens == 3 assert response.scope3ai.impact is not None assert response.scope3ai.impact.total_impact is not None From c39c7f13bb11e00a8f0631f4cd840110a0ceb294 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 17:56:49 -0500 Subject: [PATCH 13/14] fix: fix managed_service_id --- scope3ai/tracers/anthropic/chat.py | 6 ------ scope3ai/tracers/cohere/chat.py | 4 ---- scope3ai/tracers/cohere/chat_v2.py | 4 ---- scope3ai/tracers/google_genai/chat.py | 1 - scope3ai/tracers/huggingface/chat.py | 4 ---- scope3ai/tracers/huggingface/image_to_image.py | 1 - scope3ai/tracers/huggingface/speech_to_text.py | 1 - scope3ai/tracers/huggingface/text_to_image.py | 1 - scope3ai/tracers/huggingface/text_to_speech.py | 1 - scope3ai/tracers/huggingface/translation.py | 1 - scope3ai/tracers/huggingface/vision/image_classification.py | 1 - scope3ai/tracers/huggingface/vision/image_segmentation.py | 1 - scope3ai/tracers/huggingface/vision/object_detection.py | 1 - scope3ai/tracers/litellm/chat.py | 4 ---- scope3ai/tracers/litellm/speech_to_text.py | 1 - scope3ai/tracers/litellm/text_to_image.py | 1 - scope3ai/tracers/litellm/text_to_speech.py | 1 - scope3ai/tracers/mistralai/chat.py | 4 ---- scope3ai/tracers/openai/chat.py | 4 ---- scope3ai/tracers/openai/speech_to_text.py | 1 - scope3ai/tracers/openai/text_to_image.py | 1 - scope3ai/tracers/openai/text_to_speech.py | 1 - scope3ai/tracers/openai/translation.py | 1 - 23 files changed, 46 deletions(-) diff --git a/scope3ai/tracers/anthropic/chat.py b/scope3ai/tracers/anthropic/chat.py index 426ff99..8bf5bb5 100644 --- a/scope3ai/tracers/anthropic/chat.py +++ b/scope3ai/tracers/anthropic/chat.py @@ -63,7 +63,6 @@ def __stream_text__(self) -> Iterator[str]: # type: ignore[misc] input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=requests_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row) @@ -106,7 +105,6 @@ async def __stream_text__(self) -> AsyncIterator[str]: # type: ignore[misc] input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=requests_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row) @@ -177,7 +175,6 @@ def __stream__(self) -> Iterator[_T]: input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row) @@ -210,7 +207,6 @@ async def __stream__(self) -> AsyncIterator[_T]: input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row) @@ -229,7 +225,6 @@ def _anthropic_chat_wrapper(response: Message, request_latency: float) -> Messag input_tokens=response.usage.input_tokens, output_tokens=response.usage.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) if scope3ai_ctx is not None: @@ -263,7 +258,6 @@ async def _anthropic_async_chat_wrapper( input_tokens=response.usage.input_tokens, output_tokens=response.usage.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) if scope3ai_ctx is not None: diff --git a/scope3ai/tracers/cohere/chat.py b/scope3ai/tracers/cohere/chat.py index 6e3e9c8..67f6e33 100644 --- a/scope3ai/tracers/cohere/chat.py +++ b/scope3ai/tracers/cohere/chat.py @@ -47,7 +47,6 @@ def cohere_chat_wrapper( input_tokens=response.meta.tokens.input_tokens, output_tokens=response.meta.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) return NonStreamedChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -68,7 +67,6 @@ async def cohere_async_chat_wrapper( input_tokens=response.meta.tokens.input_tokens, output_tokens=response.meta.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) return NonStreamedChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -93,7 +91,6 @@ def cohere_stream_chat_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) yield StreamEndStreamedChatResponse(**event.dict(), scope3ai=scope3ai_ctx) @@ -120,7 +117,6 @@ async def cohere_async_stream_chat_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) yield StreamEndStreamedChatResponse(**event.dict(), scope3ai=scope3ai_ctx) diff --git a/scope3ai/tracers/cohere/chat_v2.py b/scope3ai/tracers/cohere/chat_v2.py index b7950c8..9996f69 100644 --- a/scope3ai/tracers/cohere/chat_v2.py +++ b/scope3ai/tracers/cohere/chat_v2.py @@ -50,7 +50,6 @@ def cohere_chat_v2_wrapper( input_tokens=response.usage.tokens.input_tokens, output_tokens=response.usage.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) return ChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -71,7 +70,6 @@ async def cohere_async_chat_v2_wrapper( input_tokens=response.usage.tokens.input_tokens, output_tokens=response.usage.tokens.output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) return ChatResponse(**response.dict(), scope3ai=scope3ai_ctx) @@ -97,7 +95,6 @@ def cohere_stream_chat_v2_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) yield Scope3AIStreamedChatResponseV2(type="scope3ai", scope3ai=scope3ai_ctx) @@ -123,7 +120,6 @@ async def cohere_async_stream_chat_v2_wrapper( input_tokens=input_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) yield Scope3AIStreamedChatResponseV2(type="scope3ai", scope3ai=scope3ai_ctx) diff --git a/scope3ai/tracers/google_genai/chat.py b/scope3ai/tracers/google_genai/chat.py index 12c9a45..ea97634 100644 --- a/scope3ai/tracers/google_genai/chat.py +++ b/scope3ai/tracers/google_genai/chat.py @@ -21,7 +21,6 @@ def get_impact_row(response: _GenerateContentResponse, duration_ms: float) -> Im input_tokens=response.usage_metadata.prompt_token_count, output_tokens=response.usage_metadata.candidates_token_count or 0, request_duration_ms=duration_ms * 1000, - managed_service_id=PROVIDER, ) diff --git a/scope3ai/tracers/huggingface/chat.py b/scope3ai/tracers/huggingface/chat.py index 3b09d2f..069130f 100644 --- a/scope3ai/tracers/huggingface/chat.py +++ b/scope3ai/tracers/huggingface/chat.py @@ -64,7 +64,6 @@ def huggingface_chat_wrapper_non_stream( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) chat = ChatCompletionOutput(**asdict(response)) @@ -90,7 +89,6 @@ def huggingface_chat_wrapper_stream( model_id=model, output_tokens=token_count, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) chunk_data = ChatCompletionStreamOutput(**asdict(chunk)) scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) @@ -127,7 +125,6 @@ async def huggingface_async_chat_wrapper_non_stream( input_tokens=response.usage.prompt_tokens, output_tokens=output_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) @@ -152,7 +149,6 @@ async def huggingface_async_chat_wrapper_stream( output_tokens=token_count, request_duration_ms=request_latency * 1000, # TODO: can we get the header that has the processing time - managed_service_id=PROVIDER, ) scope3_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chunk_data = ChatCompletionStreamOutput(**asdict(chunk)) diff --git a/scope3ai/tracers/huggingface/image_to_image.py b/scope3ai/tracers/huggingface/image_to_image.py index 096a0b9..9631f8c 100644 --- a/scope3ai/tracers/huggingface/image_to_image.py +++ b/scope3ai/tracers/huggingface/image_to_image.py @@ -64,7 +64,6 @@ def _hugging_face_image_to_image_get_impact_row( input_tokens=int(input_tokens), task=Task.image_generation, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, output_images=[RootImage(root=f"{output_width}x{output_height}")], input_images=input_images, ) diff --git a/scope3ai/tracers/huggingface/speech_to_text.py b/scope3ai/tracers/huggingface/speech_to_text.py index 7e24dad..ebab950 100644 --- a/scope3ai/tracers/huggingface/speech_to_text.py +++ b/scope3ai/tracers/huggingface/speech_to_text.py @@ -50,7 +50,6 @@ def _hugging_face_automatic_recognition_get_impact_row( task=Task.text_to_speech, input_audio_seconds=float(compute_audio_length), request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = AutomaticSpeechRecognitionOutput(**asdict(response)) return result, scope3_row diff --git a/scope3ai/tracers/huggingface/text_to_image.py b/scope3ai/tracers/huggingface/text_to_image.py index e0fb7d5..2b16f09 100644 --- a/scope3ai/tracers/huggingface/text_to_image.py +++ b/scope3ai/tracers/huggingface/text_to_image.py @@ -52,7 +52,6 @@ def _hugging_face_text_to_image_get_impact_row( task=Task.text_to_image, output_images=[RootImage(root=f"{width}x{height}")], request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = TextToImageOutput(response) diff --git a/scope3ai/tracers/huggingface/text_to_speech.py b/scope3ai/tracers/huggingface/text_to_speech.py index ba09544..ec0c071 100644 --- a/scope3ai/tracers/huggingface/text_to_speech.py +++ b/scope3ai/tracers/huggingface/text_to_speech.py @@ -50,7 +50,6 @@ def _hugging_face_text_to_speech_get_impact_row( input_tokens=int(input_tokens), task=Task.text_to_speech, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = TextToSpeechOutput(audio=response, sampling_rate=16000) return result, scope3_row diff --git a/scope3ai/tracers/huggingface/translation.py b/scope3ai/tracers/huggingface/translation.py index 9be069b..423d12c 100644 --- a/scope3ai/tracers/huggingface/translation.py +++ b/scope3ai/tracers/huggingface/translation.py @@ -51,7 +51,6 @@ def _hugging_face_translation_get_impact_row( input_tokens=int(input_tokens), output_tokens=output_tokens, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, ) result = TranslationOutput(**asdict(response)) return result, scope3_row diff --git a/scope3ai/tracers/huggingface/vision/image_classification.py b/scope3ai/tracers/huggingface/vision/image_classification.py index a0b477c..dc59c49 100644 --- a/scope3ai/tracers/huggingface/vision/image_classification.py +++ b/scope3ai/tracers/huggingface/vision/image_classification.py @@ -59,7 +59,6 @@ def _hugging_face_image_classification_get_impact_row( task=Task.image_classification, output_images=[], # No images to output in classification request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, input_images=input_images, ) result = ImageClassificationOutput(elements=response) diff --git a/scope3ai/tracers/huggingface/vision/image_segmentation.py b/scope3ai/tracers/huggingface/vision/image_segmentation.py index 817c554..1fdf56f 100644 --- a/scope3ai/tracers/huggingface/vision/image_segmentation.py +++ b/scope3ai/tracers/huggingface/vision/image_segmentation.py @@ -58,7 +58,6 @@ def _hugging_face_image_segmentation_get_impact_row( input_tokens=0, task=Task.image_segmentation, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, input_images=input_images, ) result = ImageSegmentationOutput() diff --git a/scope3ai/tracers/huggingface/vision/object_detection.py b/scope3ai/tracers/huggingface/vision/object_detection.py index 533af97..eef2492 100644 --- a/scope3ai/tracers/huggingface/vision/object_detection.py +++ b/scope3ai/tracers/huggingface/vision/object_detection.py @@ -58,7 +58,6 @@ def _hugging_face_object_detection_get_impact_row( input_tokens=0, # No token usage for object detection task=Task.object_detection, request_duration_ms=float(compute_time) * 1000, - managed_service_id=PROVIDER, input_images=input_images, ) result = ObjectDetectionOutput(elements=response) diff --git a/scope3ai/tracers/litellm/chat.py b/scope3ai/tracers/litellm/chat.py index 3aece11..23e8af4 100644 --- a/scope3ai/tracers/litellm/chat.py +++ b/scope3ai/tracers/litellm/chat.py @@ -56,7 +56,6 @@ def litellm_chat_wrapper_stream( # type: ignore[misc] model_id=model, output_tokens=token_count, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) if scope3ai_ctx is not None: @@ -90,7 +89,6 @@ def litellm_chat_wrapper_non_stream( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.total_tokens, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -143,7 +141,6 @@ async def litellm_async_chat_wrapper_base( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.total_tokens, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -184,7 +181,6 @@ async def litellm_async_chat_wrapper_stream( # type: ignore[misc] model_id=model, output_tokens=token_count, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) if scope3ai_ctx is not None: diff --git a/scope3ai/tracers/litellm/speech_to_text.py b/scope3ai/tracers/litellm/speech_to_text.py index 0aa203b..3fd4f55 100644 --- a/scope3ai/tracers/litellm/speech_to_text.py +++ b/scope3ai/tracers/litellm/speech_to_text.py @@ -39,7 +39,6 @@ def litellm_speech_to_text_get_impact_row( model_id=model, output_tokens=output_tokens, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, task=Task.speech_to_text, **options, ) diff --git a/scope3ai/tracers/litellm/text_to_image.py b/scope3ai/tracers/litellm/text_to_image.py index b3c274b..ad1b24c 100644 --- a/scope3ai/tracers/litellm/text_to_image.py +++ b/scope3ai/tracers/litellm/text_to_image.py @@ -40,7 +40,6 @@ def litellm_image_generation_get_impact_row( model_id=model or DEFAULT_MODEL, task=Task.text_to_image, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, output_images=[size] * n, input_tokens=input_tokens, ) diff --git a/scope3ai/tracers/litellm/text_to_speech.py b/scope3ai/tracers/litellm/text_to_speech.py index e861292..4ad3560 100644 --- a/scope3ai/tracers/litellm/text_to_speech.py +++ b/scope3ai/tracers/litellm/text_to_speech.py @@ -41,7 +41,6 @@ def litellm_speech_generation_get_impact_row( scope3_row = ImpactRow( model_id=model, request_duration_ms=float(request_latency) * 1000, - managed_service_id=PROVIDER, **options, ) return scope3_row diff --git a/scope3ai/tracers/mistralai/chat.py b/scope3ai/tracers/mistralai/chat.py index ca522f2..dee2fbd 100644 --- a/scope3ai/tracers/mistralai/chat.py +++ b/scope3ai/tracers/mistralai/chat.py @@ -41,7 +41,6 @@ def mistralai_v1_chat_wrapper( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) messages = args[1] if len(args) > 1 else kwargs.get("messages") @@ -71,7 +70,6 @@ def mistralai_v1_chat_wrapper_stream( input_tokens=chunk.data.usage.prompt_tokens, output_tokens=chunk.data.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row) chunk.data = CompletionChunk(**chunk.data.model_dump(), scope3ai=scope3ai_ctx) @@ -92,7 +90,6 @@ async def mistralai_v1_async_chat_wrapper( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chat = ChatCompletionResponse(**response.model_dump()) @@ -114,7 +111,6 @@ async def _generator( input_tokens=chunk.data.usage.prompt_tokens, output_tokens=chunk.data.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) chunk.data = CompletionChunk(**chunk.data.model_dump(), scope3ai=scope3ai_ctx) diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 0bd29c6..66c88a6 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -47,7 +47,6 @@ def _openai_chat_wrapper( input_tokens=http_response.get("usage", {}).get("prompt_tokens"), output_tokens=http_response.get("usage", {}).get("completion_tokens"), request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -70,7 +69,6 @@ def _openai_chat_wrapper( input_tokens=response.usage.prompt_tokens, output_tokens=response.usage.completion_tokens, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) if "audio" in modalities: audio_format = kwargs.get("audio", {}).get("format", "mp3") @@ -138,7 +136,6 @@ def openai_chat_wrapper_stream( output_tokens=chunk.usage.completion_tokens, request_duration_ms=request_latency * 1000, # TODO: can we get the header that has the processing time - managed_service_id=PROVIDER, ) scope3_ctx = Scope3AI.get_instance().submit_impact(scope3_row) @@ -195,7 +192,6 @@ async def openai_async_chat_wrapper_stream( output_tokens=chunk.usage.completion_tokens, request_duration_ms=request_latency * 1000, # TODO: can we get the header that has the processing time - managed_service_id=PROVIDER, ) scope3_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row) diff --git a/scope3ai/tracers/openai/speech_to_text.py b/scope3ai/tracers/openai/speech_to_text.py index b8f5042..4a8f130 100644 --- a/scope3ai/tracers/openai/speech_to_text.py +++ b/scope3ai/tracers/openai/speech_to_text.py @@ -51,7 +51,6 @@ def _openai_speech_to_text_get_impact_row( scope3_row = ImpactRow( model_id=model, - managed_service_id=PROVIDER, output_tokens=output_tokens, request_duration_ms=request_latency, task=Task.speech_to_text, diff --git a/scope3ai/tracers/openai/text_to_image.py b/scope3ai/tracers/openai/text_to_image.py index 67050a8..0356f90 100644 --- a/scope3ai/tracers/openai/text_to_image.py +++ b/scope3ai/tracers/openai/text_to_image.py @@ -31,7 +31,6 @@ def _openai_image_get_impact_row( task=Task.text_to_image, output_images=[size] * n, request_duration_ms=request_latency * 1000, - managed_service_id=PROVIDER, ) result = ImageResponse.model_construct(**response.model_dump()) diff --git a/scope3ai/tracers/openai/text_to_speech.py b/scope3ai/tracers/openai/text_to_speech.py index cbfd51e..de0c361 100644 --- a/scope3ai/tracers/openai/text_to_speech.py +++ b/scope3ai/tracers/openai/text_to_speech.py @@ -45,7 +45,6 @@ def _openai_text_to_speech_get_impact_row( model_id=model_requested, input_tokens=input_tokens, request_duration_ms=request_latency, - managed_service_id=PROVIDER, output_audio_seconds=duration, task=Task.text_to_speech, ) diff --git a/scope3ai/tracers/openai/translation.py b/scope3ai/tracers/openai/translation.py index e6427e4..3947a7a 100644 --- a/scope3ai/tracers/openai/translation.py +++ b/scope3ai/tracers/openai/translation.py @@ -51,7 +51,6 @@ def _openai_translation_get_impact_row( scope3_row = ImpactRow( model_id=model, - managed_service_id=PROVIDER, output_tokens=output_tokens, request_duration_ms=request_latency, task=Task.translation, From aa2ec8a7a01329a5148e8c50d40d8e6a2b361d95 Mon Sep 17 00:00:00 2001 From: Kevin Guevara Date: Wed, 5 Feb 2025 18:09:17 -0500 Subject: [PATCH 14/14] fix: fix managed_service_id --- scope3ai/lib.py | 8 +++++++- scope3ai/tracers/anthropic/chat.py | 2 -- scope3ai/tracers/cohere/chat.py | 3 --- scope3ai/tracers/cohere/chat_v2.py | 3 --- scope3ai/tracers/google_genai/chat.py | 3 --- scope3ai/tracers/huggingface/chat.py | 2 -- scope3ai/tracers/huggingface/image_to_image.py | 2 -- scope3ai/tracers/huggingface/speech_to_text.py | 2 -- scope3ai/tracers/huggingface/text_to_image.py | 2 -- scope3ai/tracers/huggingface/text_to_speech.py | 2 -- scope3ai/tracers/huggingface/translation.py | 2 -- .../tracers/huggingface/vision/image_classification.py | 2 -- scope3ai/tracers/huggingface/vision/image_segmentation.py | 2 -- scope3ai/tracers/huggingface/vision/object_detection.py | 2 -- scope3ai/tracers/litellm/chat.py | 2 -- scope3ai/tracers/litellm/speech_to_text.py | 3 --- scope3ai/tracers/litellm/text_to_image.py | 2 -- scope3ai/tracers/litellm/text_to_speech.py | 3 --- scope3ai/tracers/mistralai/chat.py | 4 +--- scope3ai/tracers/openai/chat.py | 2 -- scope3ai/tracers/openai/speech_to_text.py | 2 -- scope3ai/tracers/openai/text_to_image.py | 2 -- scope3ai/tracers/openai/text_to_speech.py | 2 -- scope3ai/tracers/openai/translation.py | 2 -- 24 files changed, 8 insertions(+), 53 deletions(-) diff --git a/scope3ai/lib.py b/scope3ai/lib.py index a79cee5..8f75edd 100644 --- a/scope3ai/lib.py +++ b/scope3ai/lib.py @@ -423,7 +423,6 @@ def set_only_if(row, field, *values): return row.request_id = generate_id() - row.managed_service_id = "" if root_tracer: set_only_if(row, "trace_id", root_tracer.trace_id) if row.utc_datetime is None: @@ -437,6 +436,13 @@ def set_only_if(row, field, *values): ) # copy tracer or global metadata + + set_only_if( + row, + "managed_service_id", + row.managed_service_id if row.managed_service_id else "", + ) + set_only_if( row, "client_id", diff --git a/scope3ai/tracers/anthropic/chat.py b/scope3ai/tracers/anthropic/chat.py index 8bf5bb5..8a201c1 100644 --- a/scope3ai/tracers/anthropic/chat.py +++ b/scope3ai/tracers/anthropic/chat.py @@ -18,10 +18,8 @@ from typing_extensions import override from scope3ai.api.types import Scope3AIContext, ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI -PROVIDER = PROVIDERS.ANTROPIC.value MessageStreamT = TypeVar("MessageStreamT", bound=_MessageStream) AsyncMessageStreamT = TypeVar("AsyncMessageStreamT", bound=_AsyncMessageStream) diff --git a/scope3ai/tracers/cohere/chat.py b/scope3ai/tracers/cohere/chat.py index 67f6e33..473f93d 100644 --- a/scope3ai/tracers/cohere/chat.py +++ b/scope3ai/tracers/cohere/chat.py @@ -11,12 +11,9 @@ StreamEndStreamedChatResponse as _StreamEndStreamedChatResponse, ) -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.api.types import Scope3AIContext, ImpactRow -PROVIDER = PROVIDERS.COHERE.value - class NonStreamedChatResponse(_NonStreamedChatResponse): scope3ai: Optional[Scope3AIContext] = None diff --git a/scope3ai/tracers/cohere/chat_v2.py b/scope3ai/tracers/cohere/chat_v2.py index 9996f69..e757db3 100644 --- a/scope3ai/tracers/cohere/chat_v2.py +++ b/scope3ai/tracers/cohere/chat_v2.py @@ -11,11 +11,8 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI -PROVIDER = PROVIDERS.COHERE.value - class ChatResponse(_ChatResponse): scope3ai: Optional[Scope3AIContext] = None diff --git a/scope3ai/tracers/google_genai/chat.py b/scope3ai/tracers/google_genai/chat.py index ea97634..2a523be 100644 --- a/scope3ai/tracers/google_genai/chat.py +++ b/scope3ai/tracers/google_genai/chat.py @@ -5,11 +5,8 @@ from scope3ai.api.types import Scope3AIContext from scope3ai.api.typesgen import ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI -PROVIDER = PROVIDERS.GOOGLE_GENAI.value - class GenerateContentResponse(_GenerateContentResponse): scope3ai: Optional[Scope3AIContext] = None diff --git a/scope3ai/tracers/huggingface/chat.py b/scope3ai/tracers/huggingface/chat.py index 069130f..02ea8cf 100644 --- a/scope3ai/tracers/huggingface/chat.py +++ b/scope3ai/tracers/huggingface/chat.py @@ -13,11 +13,9 @@ from requests import Response from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_CHAT_TASK = "chat" diff --git a/scope3ai/tracers/huggingface/image_to_image.py b/scope3ai/tracers/huggingface/image_to_image.py index 9631f8c..36adfc7 100644 --- a/scope3ai/tracers/huggingface/image_to_image.py +++ b/scope3ai/tracers/huggingface/image_to_image.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_IMAGE_TO_IMAGE_TASK = "chat" diff --git a/scope3ai/tracers/huggingface/speech_to_text.py b/scope3ai/tracers/huggingface/speech_to_text.py index ebab950..d211c92 100644 --- a/scope3ai/tracers/huggingface/speech_to_text.py +++ b/scope3ai/tracers/huggingface/speech_to_text.py @@ -14,12 +14,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_SPEECH_TO_TEXT_TASK = "automatic-speech-recognition" diff --git a/scope3ai/tracers/huggingface/text_to_image.py b/scope3ai/tracers/huggingface/text_to_image.py index 2b16f09..0ead53b 100644 --- a/scope3ai/tracers/huggingface/text_to_image.py +++ b/scope3ai/tracers/huggingface/text_to_image.py @@ -14,12 +14,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_TEXT_TO_IMAGE_TASK = "text-to-image" diff --git a/scope3ai/tracers/huggingface/text_to_speech.py b/scope3ai/tracers/huggingface/text_to_speech.py index ec0c071..dc9de60 100644 --- a/scope3ai/tracers/huggingface/text_to_speech.py +++ b/scope3ai/tracers/huggingface/text_to_speech.py @@ -13,12 +13,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_TEXT_TO_SPEECH_TASK = "text-to-speech" diff --git a/scope3ai/tracers/huggingface/translation.py b/scope3ai/tracers/huggingface/translation.py index 423d12c..0c0d154 100644 --- a/scope3ai/tracers/huggingface/translation.py +++ b/scope3ai/tracers/huggingface/translation.py @@ -13,12 +13,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_TRANSLATION_TASK = "translation" diff --git a/scope3ai/tracers/huggingface/vision/image_classification.py b/scope3ai/tracers/huggingface/vision/image_classification.py index dc59c49..1db12fc 100644 --- a/scope3ai/tracers/huggingface/vision/image_classification.py +++ b/scope3ai/tracers/huggingface/vision/image_classification.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_IMAGE_CLASSIFICATION_TASK = "image-classification" diff --git a/scope3ai/tracers/huggingface/vision/image_segmentation.py b/scope3ai/tracers/huggingface/vision/image_segmentation.py index 1fdf56f..456414c 100644 --- a/scope3ai/tracers/huggingface/vision/image_segmentation.py +++ b/scope3ai/tracers/huggingface/vision/image_segmentation.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_IMAGE_SEGMENTATION_TASK = "image-segmentation" diff --git a/scope3ai/tracers/huggingface/vision/object_detection.py b/scope3ai/tracers/huggingface/vision/object_detection.py index eef2492..b826c37 100644 --- a/scope3ai/tracers/huggingface/vision/object_detection.py +++ b/scope3ai/tracers/huggingface/vision/object_detection.py @@ -16,12 +16,10 @@ from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.response_interceptor.aiohttp_interceptor import aiohttp_response_capture from scope3ai.response_interceptor.requests_interceptor import requests_response_capture -PROVIDER = PROVIDERS.HUGGINGFACE_HUB.value HUGGING_FACE_OBJECT_DETECTION_TASK = "object-detection" diff --git a/scope3ai/tracers/litellm/chat.py b/scope3ai/tracers/litellm/chat.py index 23e8af4..8efc4d7 100644 --- a/scope3ai/tracers/litellm/chat.py +++ b/scope3ai/tracers/litellm/chat.py @@ -8,13 +8,11 @@ from scope3ai import Scope3AI from scope3ai.api.types import Scope3AIContext, ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.multimodal import ( aggregate_multimodal, aggregate_multimodal_audio_content_output, ) -PROVIDER = PROVIDERS.LITELLM.value logger = logging.getLogger("scope3ai.tracers.litellm.chat") diff --git a/scope3ai/tracers/litellm/speech_to_text.py b/scope3ai/tracers/litellm/speech_to_text.py index 3fd4f55..e7c7001 100644 --- a/scope3ai/tracers/litellm/speech_to_text.py +++ b/scope3ai/tracers/litellm/speech_to_text.py @@ -9,11 +9,8 @@ from scope3ai.api.types import ImpactRow from scope3ai.api.types import Scope3AIContext from scope3ai.api.typesgen import Task -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.LITELLM.value - class TranscriptionResponse(_TranscriptionResponse): scope3ai: Optional[Scope3AIContext] = None diff --git a/scope3ai/tracers/litellm/text_to_image.py b/scope3ai/tracers/litellm/text_to_image.py index ad1b24c..a92358d 100644 --- a/scope3ai/tracers/litellm/text_to_image.py +++ b/scope3ai/tracers/litellm/text_to_image.py @@ -8,9 +8,7 @@ from scope3ai import Scope3AI from scope3ai.api.types import ImpactRow, Scope3AIContext from scope3ai.api.typesgen import Image as RootImage, Task -from scope3ai.constants import PROVIDERS -PROVIDER = PROVIDERS.LITELLM.value DEFAULT_MODEL = "dall-e-2" DEFAULT_SIZE = "1024x1024" DEFAULT_N = 1 diff --git a/scope3ai/tracers/litellm/text_to_speech.py b/scope3ai/tracers/litellm/text_to_speech.py index 4ad3560..7324982 100644 --- a/scope3ai/tracers/litellm/text_to_speech.py +++ b/scope3ai/tracers/litellm/text_to_speech.py @@ -7,11 +7,8 @@ from scope3ai import Scope3AI from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.audio import _get_audio_duration -PROVIDER = PROVIDERS.LITELLM.value - class HttpxBinaryResponseContent(_legacy_response.HttpxBinaryResponseContent): scope3ai: Optional[Scope3AIContext] = None diff --git a/scope3ai/tracers/mistralai/chat.py b/scope3ai/tracers/mistralai/chat.py index dee2fbd..aa8935b 100644 --- a/scope3ai/tracers/mistralai/chat.py +++ b/scope3ai/tracers/mistralai/chat.py @@ -11,11 +11,9 @@ from scope3ai import Scope3AI from scope3ai.api.types import Scope3AIContext from scope3ai.api.typesgen import ImpactRow -from scope3ai.constants import PROVIDERS from scope3ai.tracers.utils.multimodal import aggregate_multimodal -PROVIDER = PROVIDERS.MISTRALAI.value -PROVIDER = "" + logger = logging.getLogger("scope3ai.tracers.mistralai.chat") diff --git a/scope3ai/tracers/openai/chat.py b/scope3ai/tracers/openai/chat.py index 66c88a6..9543549 100644 --- a/scope3ai/tracers/openai/chat.py +++ b/scope3ai/tracers/openai/chat.py @@ -9,14 +9,12 @@ from openai.types.chat import ChatCompletionChunk as _ChatCompletionChunk from scope3ai.api.types import ImpactRow, Scope3AIContext -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.utils.multimodal import ( aggregate_multimodal, aggregate_multimodal_audio_content_output, ) -PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger("scope3ai.tracers.openai.chat") diff --git a/scope3ai/tracers/openai/speech_to_text.py b/scope3ai/tracers/openai/speech_to_text.py index 4a8f130..31410e8 100644 --- a/scope3ai/tracers/openai/speech_to_text.py +++ b/scope3ai/tracers/openai/speech_to_text.py @@ -10,12 +10,10 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger("scope3.tracers.openai.speech_to_text") diff --git a/scope3ai/tracers/openai/text_to_image.py b/scope3ai/tracers/openai/text_to_image.py index 0356f90..41eb0a0 100644 --- a/scope3ai/tracers/openai/text_to_image.py +++ b/scope3ai/tracers/openai/text_to_image.py @@ -5,11 +5,9 @@ from openai.types.images_response import ImagesResponse as _ImageResponse from scope3ai.api.types import ImpactRow, Scope3AIContext, Task from scope3ai.api.typesgen import Image as RootImage -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse -PROVIDER = PROVIDERS.OPENAI.value DEFAULT_MODEL = "dall-e-2" DEFAULT_SIZE = "1024x1024" DEFAULT_N = 1 diff --git a/scope3ai/tracers/openai/text_to_speech.py b/scope3ai/tracers/openai/text_to_speech.py index de0c361..a8f4187 100644 --- a/scope3ai/tracers/openai/text_to_speech.py +++ b/scope3ai/tracers/openai/text_to_speech.py @@ -6,12 +6,10 @@ from openai.resources.audio.speech import AsyncSpeech, Speech, _legacy_response from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_audio_duration -PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger(f"scope3ai.tracers.{__name__}") diff --git a/scope3ai/tracers/openai/translation.py b/scope3ai/tracers/openai/translation.py index 3947a7a..6084c26 100644 --- a/scope3ai/tracers/openai/translation.py +++ b/scope3ai/tracers/openai/translation.py @@ -10,12 +10,10 @@ ) from scope3ai.api.types import ImpactRow, Scope3AIContext, Task -from scope3ai.constants import PROVIDERS from scope3ai.lib import Scope3AI from scope3ai.tracers.openai.utils import BaseModelResponse from scope3ai.tracers.utils.audio import _get_file_audio_duration -PROVIDER = PROVIDERS.OPENAI.value logger = logging.getLogger(__name__)